From: <btm...@us...> - 2010-07-20 22:31:12
|
Revision: 3256 http://bigdata.svn.sourceforge.net/bigdata/?rev=3256&view=rev Author: btmurphy Date: 2010-07-20 22:31:05 +0000 (Tue, 20 Jul 2010) Log Message: ----------- [trunk - merged from branch bugfix-btm]: trac #108 - Update NicUtil and the build.xml file to parameterize them for the ethernet interface Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/util/config/NicUtil.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/LookupStarter.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/config/lookup.config trunk/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config trunk/bigdata-jini/src/test/com/bigdata/jini/start/testReggie.config trunk/bigdata-jini/src/test/com/bigdata/jini/start/testStartJini.config trunk/bigdata-jini/src/test/com/bigdata/zookeeper/testzoo.config trunk/build.xml Property Changed: ---------------- trunk/ Property changes on: trunk ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/bugfix-btm:2594-2779 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 + /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 Modified: trunk/bigdata/src/java/com/bigdata/util/config/NicUtil.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/util/config/NicUtil.java 2010-07-20 22:28:30 UTC (rev 3255) +++ trunk/bigdata/src/java/com/bigdata/util/config/NicUtil.java 2010-07-20 22:31:05 UTC (rev 3256) @@ -505,6 +505,185 @@ } /** + * Examines each address associated with each network interface + * card (nic) installed on the current node, and returns the + * <code>String</code> value of the first such address that is + * determined to be both <i>reachable</i> and an address type + * that represents an <i>IPv4</i> address. + * + * This method will always first examine addresses that are + * <i>not</i> the <i>loopback</i> address (<i>local host</i>); + * returning a loopback adddress only if <code>true</code> + * is input for the <code>loopbackOk</code> parameter, and + * none of the non-loopback addresses satisfy this method's + * search criteria. + * + * If this method fails to find any address that satisfies the + * above criteria, then this method returns <code>null</code>. + * + * @param loopbackOk if <code>true</code>, then upon failure + * find an non-<i>loopback</i> address that + * satisfies this method's search criteria + * (an IPv4 type address and reachable), the + * first loopback address that is found to be + * reachable is returned. + * + * If <code>false</code> is input for this + * parameter, then this method will examine + * only those addresses that do <i>not</i> + * correspond to the corresponding nic's + * loopback address. + * + * @return a <code>String</code> value representing the first + * network interface address installed on the current + * node that is determined to be both <i>reachable</i> + * and an IPv4 type address; where the return value + * corresponds to a <i>loopback</i> address only if + * <code>true</code> is input for the <code>loopbackOk</code> + * parameter, and no non-loopback address satisfying + * the desired criteria can be found. If this method + * fails to find any address that satisfies the desired + * criteria, then <code>null</code> is returned. + * + * @throws SocketException if there is an error in the underlying + * I/O subsystem and/or protocol while retrieving the + * the network interfaces currently installed on the + * node. + * + * @throws IOException if a network error occurs while determining + * if a candidate return address is <i>reachable</i>. + */ + public static String getDefaultIpv4Address(boolean loopbackOk) + throws SocketException, IOException + { + //get all nics on the current node + Enumeration<NetworkInterface> nics = + NetworkInterface.getNetworkInterfaces(); + while( nics.hasMoreElements() ) { + NetworkInterface curNic = nics.nextElement(); + List<InterfaceAddress> interfaceAddrs = + curNic.getInterfaceAddresses(); + for(InterfaceAddress interfaceAddr : interfaceAddrs) { + InetAddress inetAddr = interfaceAddr.getAddress(); + boolean isIpv4 = inetAddr instanceof Inet4Address; + boolean isLoopbackAddress = inetAddr.isLoopbackAddress(); + if(isIpv4) { + if(isLoopbackAddress) continue; + boolean isReachable = inetAddr.isReachable(3*1000); + Inet4Address inet4Addr = (Inet4Address)inetAddr; + String retVal = inet4Addr.getHostAddress(); + + jiniConfigLogger.log + (CONFIG, "default IPv4 address: "+retVal); + utilLogger.log + (Level.TRACE, "default IPv4 address: "+retVal); + return retVal; + } + } + } + + if(!loopbackOk) return null; + + nics = NetworkInterface.getNetworkInterfaces(); + while( nics.hasMoreElements() ) { + NetworkInterface curNic = nics.nextElement(); + List<InterfaceAddress> interfaceAddrs = + curNic.getInterfaceAddresses(); + for(InterfaceAddress interfaceAddr : interfaceAddrs) { + InetAddress inetAddr = interfaceAddr.getAddress(); + boolean isIpv4 = inetAddr instanceof Inet4Address; + boolean isLoopbackAddress = inetAddr.isLoopbackAddress(); + if(isIpv4) { + if(!isLoopbackAddress) continue; + boolean isReachable = inetAddr.isReachable(3*1000); + Inet4Address inet4Addr = (Inet4Address)inetAddr; + String retVal = inet4Addr.getHostAddress(); + + jiniConfigLogger.log + (CONFIG, "default IPv4 address: "+retVal); + utilLogger.log + (Level.TRACE, "default IPv4 address: "+retVal); + return retVal; + } + } + } + return null; + } + + public static String getDefaultIpv4Address() + throws SocketException, IOException + { + return getDefaultIpv4Address(false);//localhost NOT ok + } + + /** + * Special-purpose convenience method that will return the + * value of the system property named <code>default.nic</code> + * if that property has been set on the current VM to any + * non-<code>null</code> value other than the value, + * <code>${default.nic}</code>; otherwise returns the value + * input for the <code>defaultVal</code> parameter. + * <p> + * This method can be called from within a configuration + * as well as from within program control. It is provided + * as a way to allow mechanisms that are not able to + * conditionally set system properties to always set the + * system property named <code>default.nic</code> to some + * value -- either valid or invalid -- depending on whether + * that property is set as a system property on the command + * line. + * <p> + * For example, the <code>java</code> ant task used to exec + * java based programs does not allow one to set a system + * property on that program's VM when that property is set + * on the command line used to execute ant, but leave that + * property un-set when ant's command line does not specify + * a value for that property. That is, although ant allows + * one to retrieve the value of a system property that is + * specified on ant's command line and substitute that value + * (using Java's '${}' token-substitution mechanism) into + * a system property set on the program's VM, ant does not + * allow one to leave that property un-set when the system + * property is not specified on ant's command line; the + * property must either always be set on the VM, or never + * set. If the ant script then is written to always set the + * system property on the exec'd program's VM to the + * value of the system property expected on ant's command + * line, then whenever that system property is not set on + * ant's command line, the value that is substituted into + * the system property when the program is exec-ed will be + * of the form, <code>${default.nic}</code>. If the program + * that is exec'd then attempts to call + * <code>System.getProperty("default.nic")</code>, that + * call will return the non-<code>null</code> value, + * <code>${default.nic}</code>; which is typically not + * what is desired. + * + * @param defaultVal <code>String</code> value that this method + * returns if the system property named + * <code>default.nic</code> is either not + * set or is equal to the value + * <code>${default.nic}</code>. + * + * @return a <code>String</code> value equal to the value of + * the system property named <code>${default.nic}</code> + * if that system property is set to any value except + * <code>null</code> or the value <code>${default.nic}</code>; + * otherwise, returns the value input for the + * <code>defaultVal</code> parameter. + */ + public static String getDefaultNic(String defaultVal) { + String defaultNic = System.getProperty("default.nic"); + if( ("${default.nic}").equals(defaultNic) ) defaultNic = null; + if(defaultNic == null) defaultNic = defaultVal; + return defaultNic; + } + + public static String getDefaultNic() { + return getDefaultNic(null); + } + + /** * Intended for use by scripts. */ public static void main(String[] args) { Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/LookupStarter.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/LookupStarter.java 2010-07-20 22:28:30 UTC (rev 3255) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/LookupStarter.java 2010-07-20 22:31:05 UTC (rev 3256) @@ -70,8 +70,19 @@ private String jiniLibDl = System.getProperty("jini.lib.dl"); private String localPolicy = System.getProperty("java.security.policy"); - private static String thisHost = NicUtil.getIpAddress("eth0"); - private static String defaultGroup = "bigdata.fedname-"+thisHost; + private static String thisHost = null; + private static String defaultGroup = null; + static { + try { + thisHost = + NicUtil.getIpAddress + ( NicUtil.getDefaultNic + ( NicUtil.getDefaultIpv4Address(true) ) ); + defaultGroup = + System.getProperty("bigdata.fedname", + "bigdata.test.group-"+thisHost); + } catch (Throwable t) { /* swallow */ } + } private static String defaultCodebasePort = "23333"; private static String group = Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/config/lookup.config =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/config/lookup.config 2010-07-20 22:28:30 UTC (rev 3255) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/config/lookup.config 2010-07-20 22:31:05 UTC (rev 3256) @@ -8,10 +8,10 @@ import com.bigdata.util.config.NicUtil; com.sun.jini.reggie { - private static exportNic = "eth0"; private static exportPort = 0;//randomly chosen port - private static exportHost = NicUtil.getIpAddress(exportNic); - private static exportIpAddr = NicUtil.getIpAddress(exportNic,0,exportHost); + private static exportIpAddr = + NicUtil.getDefaultNic + ( NicUtil.getDefaultIpv4Address(true) ); private static codebasePort = 23333;//value used if not set by system prop // Public configuration entries @@ -21,7 +21,9 @@ new BasicILFactory(), false, true); initialMemberGroups = - new String[] {"bigdata.test.group-"+exportHost};//use if no sys prop + new String[] { System.getProperty + ("bigdata.fedname", + "bigdata.test.group-"+exportIpAddr ) }; unicastDiscoveryHost = exportIpAddr; } Modified: trunk/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config =================================================================== --- trunk/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config 2010-07-20 22:28:30 UTC (rev 3255) +++ trunk/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config 2010-07-20 22:31:05 UTC (rev 3256) @@ -76,7 +76,9 @@ // The default root for all persistent state. static serviceDir = ConfigMath.getAbsoluteFile(new File(fedname)); - private static localIpAddr = NicUtil.getIpAddress("eth0", 0, null, true); + private static localIpAddr = + NicUtil.getDefaultNic + ( NicUtil.getDefaultIpv4Address(true) ); /* * Example cluster configuration. Modified: trunk/bigdata-jini/src/test/com/bigdata/jini/start/testReggie.config =================================================================== --- trunk/bigdata-jini/src/test/com/bigdata/jini/start/testReggie.config 2010-07-20 22:28:30 UTC (rev 3255) +++ trunk/bigdata-jini/src/test/com/bigdata/jini/start/testReggie.config 2010-07-20 22:31:05 UTC (rev 3256) @@ -14,7 +14,9 @@ com.sun.jini.reggie { - private static exportIpAddr = NicUtil.getIpAddress("eth0", 0, null, true); + private static exportIpAddr = + NicUtil.getDefaultNic + ( NicUtil.getDefaultIpv4Address(true) ); private static exportPort = Integer.parseInt("0"); private static serverILFactory = Modified: trunk/bigdata-jini/src/test/com/bigdata/jini/start/testStartJini.config =================================================================== --- trunk/bigdata-jini/src/test/com/bigdata/jini/start/testStartJini.config 2010-07-20 22:28:30 UTC (rev 3255) +++ trunk/bigdata-jini/src/test/com/bigdata/jini/start/testStartJini.config 2010-07-20 22:31:05 UTC (rev 3256) @@ -14,7 +14,9 @@ com.sun.jini.start { private static appHome = System.getProperty("app.home", ConfigMath.getAbsolutePath(new File(".")) ); - private static codebaseHost = NicUtil.getIpAddress("eth0", 0, null, true); + private static codebaseHost = + NicUtil.getDefaultNic + ( NicUtil.getDefaultIpv4Address(true) ); private static codebasePort = "23334"; private static defaultCodebaseRootDir = ConfigUtil.concat( new String[] { appHome, "${/}bigdata-jini${/}lib${/}jini${/}lib-dl" } ); Modified: trunk/bigdata-jini/src/test/com/bigdata/zookeeper/testzoo.config =================================================================== --- trunk/bigdata-jini/src/test/com/bigdata/zookeeper/testzoo.config 2010-07-20 22:28:30 UTC (rev 3255) +++ trunk/bigdata-jini/src/test/com/bigdata/zookeeper/testzoo.config 2010-07-20 22:31:05 UTC (rev 3256) @@ -33,7 +33,9 @@ private static appHome = System.getProperty("app.home", ConfigMath.getAbsolutePath(new File(".")) ); - private static localIpAddr = NicUtil.getIpAddress("eth0", 0, null, true); + private static localIpAddr = + NicUtil.getDefaultNic + ( NicUtil.getDefaultIpv4Address(true) ); /* A comma delimited list of the known zookeeper servers together * with their assigned "myid". Modified: trunk/build.xml =================================================================== --- trunk/build.xml 2010-07-20 22:28:30 UTC (rev 3255) +++ trunk/build.xml 2010-07-20 22:31:05 UTC (rev 3256) @@ -1345,7 +1345,7 @@ <mkdir dir="${bigdata-test.lib}"/> <property name="bigdata-test.jar" location="${bigdata-test.lib}/bigdata-test.jar" /> - <property name="javac.test.classpath" value="${classes.dir}:${junit.jar}:${cweb-junit-ext.jar}:${sesame-sparql-test.jar}:${sesame-store-test.jar}:${dist.lib}/classserver.jar:${dist.lib}/ctc_utils.jar:${dist.lib}/cweb-commons.jar:${dist.lib}/cweb-extser.jar:${dist.lib}/highscalelib.jar:${dist.lib}/dsiutils.jar:${dist.lib}/lgplutils.jar:${dist.lib}/fastutil.jar:${dist.lib}/icu4j.jar:${dist.lib}/iris.jar:${dist.lib}/log4j.jar:${dist.lib}/openrdf-sesame.jar:${dist.lib}/slf4j.jar:${dist.lib}/jsk-lib.jar:${dist.lib}/jsk-platform.jar:${dist.lib}/nxparser.jar:${dist.lib}/zookeeper.jar"/> + <property name="javac.test.classpath" value="${classes.dir}${path.separator}${junit.jar}${path.separator}${cweb-junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${dist.lib}/classserver.jar${path.separator}${dist.lib}/ctc_utils.jar${path.separator}${dist.lib}/cweb-commons.jar${path.separator}${dist.lib}/cweb-extser.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/iris.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar"/> <echo>javac </echo> @@ -1533,7 +1533,7 @@ </target> <target name="startLookup"> - <echo>java -Dapp.home=${app.home} -Djini.lib=${dist.lib} -Djini.lib.dl=${dist.lib.dl} -Djava.security.policy=${java.security.policy} -Djava.security.debug=off -Djava.protocol.handler.pkgs=net.jini.url -Dlog4j.configuration=${log4j.configuration} -Dcodebase.port=${test.codebase.port} -Djava.net.preferIPv4Stack=${java.net.preferIPv4Stack} -Dbigdata.fedname=${bigdata.fedname} -jar ${bigdata-test.lib}/lookupstarter.jar + <echo>java -Dapp.home=${app.home} -Djini.lib=${dist.lib} -Djini.lib.dl=${dist.lib.dl} -Djava.security.policy=${java.security.policy} -Djava.security.debug=off -Djava.protocol.handler.pkgs=net.jini.url -Dlog4j.configuration=${log4j.configuration} -Dcodebase.port=${test.codebase.port} -Djava.net.preferIPv4Stack=${java.net.preferIPv4Stack} -Dbigdata.fedname=${bigdata.fedname} -Ddefault.nic=${default.nic} -jar ${bigdata-test.lib}/lookupstarter.jar </echo> <echo> </echo> @@ -1551,11 +1551,12 @@ <sysproperty key="codebase.port" value="${test.codebase.port}"/> <sysproperty key="java.net.preferIPv4Stack" value="${java.net.preferIPv4Stack}"/> <sysproperty key="bigdata.fedname" value="${bigdata.fedname}"/> + <sysproperty key="default.nic" value="${default.nic}"/> </java> </target> <target name="stopLookup"> - <echo>java -Dapp.home=${app.home} -Djini.lib=${dist.lib} -Djini.lib.dl=${dist.lib.dl} -Djava.security.policy=${java.security.policy} -Dlog4j.configuration=${log4j.configuration} -Djava.net.preferIPv4Stack=${java.net.preferIPv4Stack} -Dbigdata.fedname=${bigdata.fedname} -jar ${bigdata-test.lib}/lookupstarter.jar -stop + <echo>java -Dapp.home=${app.home} -Djini.lib=${dist.lib} -Djini.lib.dl=${dist.lib.dl} -Djava.security.policy=${java.security.policy} -Dlog4j.configuration=${log4j.configuration} -Djava.net.preferIPv4Stack=${java.net.preferIPv4Stack} -Dbigdata.fedname=${bigdata.fedname} -Ddefault.nic=${default.nic} -jar ${bigdata-test.lib}/lookupstarter.jar -stop </echo> <echo> </echo> @@ -1569,6 +1570,7 @@ <sysproperty key="log4j.configuration" value="${log4j.configuration}"/> <sysproperty key="java.net.preferIPv4Stack" value="${java.net.preferIPv4Stack}"/> <sysproperty key="bigdata.fedname" value="${bigdata.fedname}"/> + <sysproperty key="default.nic" value="${default.nic}"/> <arg value="-stop" /> </java> </target> @@ -1607,7 +1609,7 @@ <pathelement location="${dist.lib}/zookeeper.jar"/> </path> - <property name="run.class.path" value="${junit.jar}:${bigdata-test.jar}:${cweb-junit-ext.jar}:${sesame-sparql-test.jar}:${sesame-store-test.jar}:${dist.lib}/bigdata.jar:${dist.lib}/colt.jar:${dist.lib}/cweb-commons.jar:${dist.lib}/cweb-extser.jar:${dist.lib}/ctc_utils.jar:${dist.lib}/highscalelib.jar:${dist.lib}/dsiutils.jar:${dist.lib}/lgplutils.jar:${dist.lib}/fastutil.jar:${dist.lib}/icu4j.jar:${dist.lib}/iris.jar:${dist.lib}/jsk-lib.jar:${dist.lib}/jsk-platform.jar:${dist.lib}/log4j.jar:${dist.lib}/lucene-analyzer.jar:${dist.lib}/lucene-core.jar:${dist.lib}/openrdf-sesame.jar:${dist.lib}/slf4j.jar:${dist.lib}/slf4j-log4j.jar:${dist.lib}/nxparser.jar:${dist.lib}/zookeeper.jar"/> + <property name="run.class.path" value="${junit.jar}${path.separator}${bigdata-test.jar}${path.separator}${cweb-junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${dist.lib}/bigdata.jar${path.separator}${dist.lib}/colt.jar${path.separator}${dist.lib}/cweb-commons.jar${path.separator}${dist.lib}/cweb-extser.jar${path.separator}${dist.lib}/ctc_utils.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/iris.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/slf4j-log4j.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar"/> <echo> classpath: ${run.class.path} </echo> @@ -1806,7 +1808,7 @@ <pathelement location="${dist.lib}/nxparser.jar"/> <pathelement location="${dist.lib}/zookeeper.jar"/> </path> - <property name="run.class.path" value="${junit.jar}:${bigdata-test.jar}:${cweb-junit-ext.jar}:${sesame-sparql-test.jar}:${sesame-store-test.jar}:${dist.lib}/bigdata.jar:${dist.lib}/colt.jar:${dist.lib}/cweb-commons.jar:${dist.lib}/cweb-extser.jar:${dist.lib}/ctc_utils.jar:${dist.lib}/highscalelib.jar:${dist.lib}/dsiutils.jar:${dist.lib}/lgplutils.jar:${dist.lib}/fastutil.jar:${dist.lib}/icu4j.jar:${dist.lib}/iris.jar:${dist.lib}/jsk-lib.jar:${dist.lib}/jsk-platform.jar:${dist.lib}/log4j.jar:${dist.lib}/lucene-analyzer.jar:${dist.lib}/lucene-core.jar:${dist.lib}/openrdf-sesame.jar:${dist.lib}/slf4j.jar:${dist.lib}/slf4j-log4j.jar:${dist.lib}/nxparser.jar:${dist.lib}/zookeeper.jar"/> + <property name="run.class.path" value="${junit.jar}${path.separator}${bigdata-test.jar}${path.separator}${cweb-junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${dist.lib}/bigdata.jar${path.separator}${dist.lib}/colt.jar${path.separator}${dist.lib}/cweb-commons.jar${path.separator}${dist.lib}/cweb-extser.jar${path.separator}${dist.lib}/ctc_utils.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/iris.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/slf4j-log4j.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar"/> <!-- Generate the LUBM dataset. <mkdir dir="${data}"/> <java classname="edu.lehigh.swat.bench.uba.Generator" dir="${data}" fork="yes"> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <btm...@us...> - 2010-07-22 14:37:12
|
Revision: 3265 http://bigdata.svn.sourceforge.net/bigdata/?rev=3265&view=rev Author: btmurphy Date: 2010-07-22 14:37:05 +0000 (Thu, 22 Jul 2010) Log Message: ----------- [trunk]: additional changes to NicUtil to address trac #108 as well as new targets added to build.xml to allow one to use bigdataCluster.config to run a federation on a single node using the current ServicesManagerService & ant install mechanism Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/util/config/NicUtil.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/LookupStarter.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/config/lookup.config trunk/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config trunk/bigdata-jini/src/test/com/bigdata/jini/start/testReggie.config trunk/bigdata-jini/src/test/com/bigdata/jini/start/testStartJini.config trunk/bigdata-jini/src/test/com/bigdata/zookeeper/testzoo.config trunk/build.properties trunk/build.xml trunk/src/resources/config/bigdataCluster.config trunk/src/resources/config/jini/reggie.config trunk/src/resources/config/jini/startAll.config trunk/src/resources/config/logging.properties Modified: trunk/bigdata/src/java/com/bigdata/util/config/NicUtil.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/util/config/NicUtil.java 2010-07-22 14:29:54 UTC (rev 3264) +++ trunk/bigdata/src/java/com/bigdata/util/config/NicUtil.java 2010-07-22 14:37:05 UTC (rev 3265) @@ -505,6 +505,141 @@ } /** + * Special-purpose convenience method that returns a + * <code>String</code> value representing the ip address of + * the current node; where the value that is returned is + * determined according to following criteria: + * <p> + * <ul> + * <li> If a non-<code>null</code> value is input for the + * <code>systemPropertyName</code> parameter, then + * this is viewed as a declaration by the caller that + * that the system property with that given value + * should take precedence over all other means of + * determining the desired ip address. As such, this + * method determines if a system property having the + * given has indeed been set and, if it has, returns + * the ip address of the nic having that name; or + * <code>null</code> if there is no nic with the + * desired name installed on the node. + * <li> If a non-<code>null</code> value is input for the + * <code>systemPropertyName</code> parameter, but + * no system property with that name has been set, + * and <code>true</code> has been passed in for + * the <code>fallbackOk</code> parameter, then this + * method will return the IPV4 based address of the + * first reachable nic that is found on the node. + * Upon failing to find such an address, if the + * <code>loopbackOk</code> parameter is also + * <code>true</code>, then this method will return + * the <i>loop back</i> address of the node; otherwise + * <code>null</code> is returned. + * <li> If <code>null</code> is input for the + * <code>systemPropertyName</code> parameter, but + * a non-<code>null</code> value is input for the + * <code>defaultNic</code> parameter, then this + * method returns the ip address of the nic having + * that name; or <code>null</code> if there is no + * nic with the desired default name installed on the + * node. + * <li> If <code>null</code> is input for both the + * <code>systemPropertyName</code> parameter and the + * <code>defaultNic</code> parameter, and if the + * <code>fallbackOk</code> parameter is <code>true</code>, + * then this method will return the IPV4 based address + * of the first reachable nic that is found on the node. + * Upon failing to find such an address, if the + * <code>loopbackOk</code> parameter is also + * <code>true</code>, then this method will return + * the <i>loop back</i> address of the node; otherwise + * <code>null</code> is returned. + * </ul> + * <p> + * This method can be called from within a configuration + * as well as from within program control. + * + * @param systemPropertyName <code>String</code> value containing + * the name of a system property whose + * value is the network interface name + * whose ip address should be returned. + * May be <code>null</code>. + * + * @param defaultNic <code>String</code> value containing + * the name of the network interface + * whose ip address should be returned + * if <code>null</code> is input for the + * <code>systemPropertyName</code> parameter. + * + * @param fallbackOk if <code>true</code>, then if either + * no system property is set having the + * name referenced by the + * <code>systemPropertyName</code> parameter, + * or if <code>null</code> is input for both + * the <code>systemPropertyName</code> + * parameter and the <code>defaultNic</code> + * parameter, return the IPV4 based address + * of the first reachable network interface + * that can be found on the node. + * + * @param loopbackOk if <code>true</code>, and if <code>true</code> + * is also input for the <code>fallbackOk</code> + * parameter, then if this method attempts, + * but fails, to find a valid IPV4 fallback + * address, then the node's <i>loop back</i> + * address is returned. + * + * @return a <code>String</code> representing an ip address associated + * with the current node; where the value that is returned is + * determined according to the criteria described above. + */ + public static String getIpAddress(String systemPropertyName, + String defaultNic, + boolean fallbackOk, + boolean loopbackOk) + throws SocketException, IOException + { + if(systemPropertyName != null) {//system property takes precedence + String nicName = System.getProperty(systemPropertyName); + boolean propSet = true; + if(nicName == null) { + propSet = false; + } else { + // handle ant script case where the system property + // may not have been set on the command line, but + // was still set to "${<systemPropertyName>}" using + // ant <sysproperty> tag + String rawProp = "${" + systemPropertyName + "}"; + if( rawProp.equals(nicName) ) propSet = false; + } + if(propSet) { + return getIpAddress(nicName, 0, loopbackOk); + } else {//desired system property not set, try fallback + if(fallbackOk) { + return getDefaultIpv4Address(loopbackOk); + } else { + return null; + } + } + } else {//no system property name provided, try default nic + if(defaultNic != null) { + return getIpAddress(defaultNic, 0, loopbackOk); + } else {//no default nic provided, try fallback + if(fallbackOk) { + return getDefaultIpv4Address(loopbackOk); + } else { + return null; + } + } + } + } + + public static String getIpAddress() + throws SocketException, IOException + { + return getIpAddress(null, null, true, true); + } + + /** * Examines each address associated with each network interface * card (nic) installed on the current node, and returns the * <code>String</code> value of the first such address that is @@ -617,73 +752,6 @@ } /** - * Special-purpose convenience method that will return the - * value of the system property named <code>default.nic</code> - * if that property has been set on the current VM to any - * non-<code>null</code> value other than the value, - * <code>${default.nic}</code>; otherwise returns the value - * input for the <code>defaultVal</code> parameter. - * <p> - * This method can be called from within a configuration - * as well as from within program control. It is provided - * as a way to allow mechanisms that are not able to - * conditionally set system properties to always set the - * system property named <code>default.nic</code> to some - * value -- either valid or invalid -- depending on whether - * that property is set as a system property on the command - * line. - * <p> - * For example, the <code>java</code> ant task used to exec - * java based programs does not allow one to set a system - * property on that program's VM when that property is set - * on the command line used to execute ant, but leave that - * property un-set when ant's command line does not specify - * a value for that property. That is, although ant allows - * one to retrieve the value of a system property that is - * specified on ant's command line and substitute that value - * (using Java's '${}' token-substitution mechanism) into - * a system property set on the program's VM, ant does not - * allow one to leave that property un-set when the system - * property is not specified on ant's command line; the - * property must either always be set on the VM, or never - * set. If the ant script then is written to always set the - * system property on the exec'd program's VM to the - * value of the system property expected on ant's command - * line, then whenever that system property is not set on - * ant's command line, the value that is substituted into - * the system property when the program is exec-ed will be - * of the form, <code>${default.nic}</code>. If the program - * that is exec'd then attempts to call - * <code>System.getProperty("default.nic")</code>, that - * call will return the non-<code>null</code> value, - * <code>${default.nic}</code>; which is typically not - * what is desired. - * - * @param defaultVal <code>String</code> value that this method - * returns if the system property named - * <code>default.nic</code> is either not - * set or is equal to the value - * <code>${default.nic}</code>. - * - * @return a <code>String</code> value equal to the value of - * the system property named <code>${default.nic}</code> - * if that system property is set to any value except - * <code>null</code> or the value <code>${default.nic}</code>; - * otherwise, returns the value input for the - * <code>defaultVal</code> parameter. - */ - public static String getDefaultNic(String defaultVal) { - String defaultNic = System.getProperty("default.nic"); - if( ("${default.nic}").equals(defaultNic) ) defaultNic = null; - if(defaultNic == null) defaultNic = defaultVal; - return defaultNic; - } - - public static String getDefaultNic() { - return getDefaultNic(null); - } - - /** * Intended for use by scripts. */ public static void main(String[] args) { Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/LookupStarter.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/LookupStarter.java 2010-07-22 14:29:54 UTC (rev 3264) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/LookupStarter.java 2010-07-22 14:37:05 UTC (rev 3265) @@ -74,10 +74,8 @@ private static String defaultGroup = null; static { try { - thisHost = - NicUtil.getIpAddress - ( NicUtil.getDefaultNic - ( NicUtil.getDefaultIpv4Address(true) ) ); + thisHost = NicUtil.getIpAddress + ("default.nic", null, true, true); defaultGroup = System.getProperty("bigdata.fedname", "bigdata.test.group-"+thisHost); Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/config/lookup.config =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/config/lookup.config 2010-07-22 14:29:54 UTC (rev 3264) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/config/lookup.config 2010-07-22 14:37:05 UTC (rev 3265) @@ -10,8 +10,7 @@ com.sun.jini.reggie { private static exportPort = 0;//randomly chosen port private static exportIpAddr = - NicUtil.getDefaultNic - ( NicUtil.getDefaultIpv4Address(true) ); + NicUtil.getIpAddress("default.nic", null, true, true); private static codebasePort = 23333;//value used if not set by system prop // Public configuration entries Modified: trunk/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config =================================================================== --- trunk/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config 2010-07-22 14:29:54 UTC (rev 3264) +++ trunk/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config 2010-07-22 14:37:05 UTC (rev 3265) @@ -77,8 +77,7 @@ static serviceDir = ConfigMath.getAbsoluteFile(new File(fedname)); private static localIpAddr = - NicUtil.getDefaultNic - ( NicUtil.getDefaultIpv4Address(true) ); + NicUtil.getIpAddress("default.nic", null, true, true); /* * Example cluster configuration. Modified: trunk/bigdata-jini/src/test/com/bigdata/jini/start/testReggie.config =================================================================== --- trunk/bigdata-jini/src/test/com/bigdata/jini/start/testReggie.config 2010-07-22 14:29:54 UTC (rev 3264) +++ trunk/bigdata-jini/src/test/com/bigdata/jini/start/testReggie.config 2010-07-22 14:37:05 UTC (rev 3265) @@ -15,8 +15,7 @@ com.sun.jini.reggie { private static exportIpAddr = - NicUtil.getDefaultNic - ( NicUtil.getDefaultIpv4Address(true) ); + NicUtil.getIpAddress("default.nic", null, true, true); private static exportPort = Integer.parseInt("0"); private static serverILFactory = Modified: trunk/bigdata-jini/src/test/com/bigdata/jini/start/testStartJini.config =================================================================== --- trunk/bigdata-jini/src/test/com/bigdata/jini/start/testStartJini.config 2010-07-22 14:29:54 UTC (rev 3264) +++ trunk/bigdata-jini/src/test/com/bigdata/jini/start/testStartJini.config 2010-07-22 14:37:05 UTC (rev 3265) @@ -15,8 +15,7 @@ private static appHome = System.getProperty("app.home", ConfigMath.getAbsolutePath(new File(".")) ); private static codebaseHost = - NicUtil.getDefaultNic - ( NicUtil.getDefaultIpv4Address(true) ); + NicUtil.getIpAddress("default.nic", null, true, true); private static codebasePort = "23334"; private static defaultCodebaseRootDir = ConfigUtil.concat( new String[] { appHome, "${/}bigdata-jini${/}lib${/}jini${/}lib-dl" } ); Modified: trunk/bigdata-jini/src/test/com/bigdata/zookeeper/testzoo.config =================================================================== --- trunk/bigdata-jini/src/test/com/bigdata/zookeeper/testzoo.config 2010-07-22 14:29:54 UTC (rev 3264) +++ trunk/bigdata-jini/src/test/com/bigdata/zookeeper/testzoo.config 2010-07-22 14:37:05 UTC (rev 3265) @@ -34,8 +34,7 @@ private static appHome = System.getProperty("app.home", ConfigMath.getAbsolutePath(new File(".")) ); private static localIpAddr = - NicUtil.getDefaultNic - ( NicUtil.getDefaultIpv4Address(true) ); + NicUtil.getIpAddress("default.nic", null, true, true); /* A comma delimited list of the known zookeeper servers together * with their assigned "myid". Modified: trunk/build.properties =================================================================== --- trunk/build.properties 2010-07-22 14:29:54 UTC (rev 3264) +++ trunk/build.properties 2010-07-22 14:37:05 UTC (rev 3265) @@ -131,6 +131,9 @@ # Where to write the log files. install.log.dir=${NAS}/log +# Where to write the deployment distribution. +install.dist.dir=${NAS}/dist + # The 'install-as' user (defaults to the user running the installer). # # @TODO ant lacks sufficient mechanisms to set the user (chown). Modified: trunk/build.xml =================================================================== --- trunk/build.xml 2010-07-22 14:29:54 UTC (rev 3264) +++ trunk/build.xml 2010-07-22 14:37:05 UTC (rev 3265) @@ -493,6 +493,7 @@ <mkdir dir="${install.lib.dir}" /> <mkdir dir="${install.bin.dir}" /> <mkdir dir="${install.log.dir}" /> +<mkdir dir="${install.dist.dir}" /> <!-- install configuration files. --> <copy toDir="${install.config.dir}"> <fileset dir="${bigdata.dir}/src/resources/config"> @@ -1064,10 +1065,9 @@ </java> </target> - <!-- --> - <!-- UNIT TESTS ONLY BELOW HERE --> - <!-- --> - + <!-- --> + <!-- STAGING --> + <!-- --> <target name="stage" description="stages resources (jar, config, policy, logging files) needed to package or execute the bigdata distribution." depends="jar"> @@ -1085,7 +1085,6 @@ <property name="dist.var.config" location="${dist.var}/config" /> <property name="dist.var.config.policy" location="${dist.var.config}/policy" /> <property name="dist.var.config.logging" location="${dist.var.config}/logging" /> - <property name="dist.var.config.zookeeper" location="${dist.var.config}/zookeeper" /> <property name="dist.var.config.jini" location="${dist.var.config}/jini" /> <delete dir="${dist.dir}" quiet="true"/> @@ -1098,7 +1097,6 @@ <mkdir dir="${dist.var.config}"/> <mkdir dir="${dist.var.config.policy}"/> <mkdir dir="${dist.var.config.logging}"/> - <mkdir dir="${dist.var.config.zookeeper}"/> <mkdir dir="${dist.var.config.jini}"/> <!-- Copy build.properties to the top-level config file --> @@ -1168,10 +1166,6 @@ <!-- Zookeeper library --> <copy file="${bigdata-zookeeper.lib}/zookeeper-3.2.1.jar" tofile="${dist.lib}/zookeeper.jar"/> -<!-- - <copy file="/home/brmurphy/zookeeper/cdd99c5/build/zookeeper-3.3.0.jar" - tofile="${dist.lib}/zookeeper.jar"/> ---> <!-- Jini library --> @@ -1231,8 +1225,6 @@ todir="${dist.bin.config}"/> <copy file="${src.resources}/bin/config/serviceStarter.config" todir="${dist.bin.config}"/> - <copy file="${src.resources}/bin/config/zookeeper.config" - todir="${dist.bin.config}"/> <!-- Stage security policy (config) files --> <copy file="${src.resources.config}/policy.all" @@ -1285,6 +1277,329 @@ todir="${dist.var.config.jini}"/> </target> + <!-- --> + <!-- RELEASE --> + <!-- --> + <target name="deploy-artifact" depends="clean, stage" + description="Create compressed tar file for deployment."> + <tar destfile="${bigdata.dir}/REL.${version}.tgz" + compression="gzip"> + + <tarfileset dir="${bigdata.dir}/dist"> + <include name="bigdata/**" /> + + <exclude name="bigdata/bin/pstart" /> + + </tarfileset> + + <!-- Add scripts separately, making them executable --> + + <tarfileset dir="${bigdata.dir}/dist" filemode="755"> + <include name="bigdata/bin/pstart" /> + + </tarfileset> + </tar> + </target> + + <target name="ant-install-prepare" depends="jar, bundle" + description="Stage all files (src, lib, config, etc.) needed for ant based install."> + <copy toDir="${build.dir}/bigdata/src"> + <fileset dir="${bigdata.dir}/bigdata/src" /> + </copy> + <copy toDir="${build.dir}/bigdata-jini/src"> + <fileset dir="${bigdata.dir}/bigdata-jini/src" /> + </copy> + <copy toDir="${build.dir}/bigdata-rdf/src"> + <fileset dir="${bigdata.dir}/bigdata-rdf/src" /> + </copy> + <copy toDir="${build.dir}/bigdata-sails/src"> + <fileset dir="${bigdata.dir}/bigdata-sails/src" /> + </copy> + <copy toDir="${build.dir}/lgpl-utils/src"> + <fileset dir="${bigdata.dir}/lgpl-utils/src" /> + </copy> + + <mkdir dir="${build.dir}/bigdata/lib" /> + <copy toDir="${build.dir}/bigdata/lib"> + <fileset dir="${bigdata.dir}/bigdata/lib" /> + </copy> + + <mkdir dir="${build.dir}/bigdata-jini/lib" /> + <copy toDir="${build.dir}/bigdata-jini/lib"> + <fileset dir="${bigdata.dir}/bigdata-jini/lib" /> + </copy> + <mkdir dir="${build.dir}/bigdata-rdf/lib" /> + <copy toDir="${build.dir}/bigdata-rdf/lib"> + <fileset dir="${bigdata.dir}/bigdata-rdf/lib" /> + </copy> + + <mkdir dir="${build.dir}/bigdata-sails/lib" /> + <copy toDir="${build.dir}/bigdata-sails/lib"> + <fileset dir="${bigdata.dir}/bigdata-sails/lib" /> + </copy> + + <mkdir dir="${build.dir}/src" /> + <mkdir dir="${build.dir}/src/resources" /> + <mkdir dir="${build.dir}/src/resources/config" /> + <copy toDir="${build.dir}/src/resources/config"> + <fileset dir="${bigdata.dir}/src/resources/config" /> + </copy> + + <mkdir dir="${build.dir}/src/resources/scripts" /> + <copy toDir="${build.dir}/src/resources/scripts"> + <fileset dir="${bigdata.dir}/src/resources/scripts" /> + </copy> + + <copy tofile="${build.dir}/build.properties" file="build.properties" /> + <copy tofile="${build.dir}/build.xml" file="build.xml" /> + <copy tofile="${build.dir}/LICENSE.txt" file="LICENSE.txt" /> + <copy tofile="${build.dir}/overview.html" file="overview.html" /> + <copy tofile="${build.dir}/README-JINI" file="README-JINI" /> + <copy toDir="${build.dir}/LEGAL" flatten="true"> + <fileset dir="${bigdata.dir}"> + <include name="**/LEGAL/*" /> + </fileset> + </copy> + </target> + + <target name="ant-install-artifact" depends="clean, ant-install-prepare, stage" + description="Create complete source tar file for ant based install."> + <mkdir dir="${release.dir}" /> + <tar destfile="${bigdata.dir}/DIST.${version}.tgz" compression="gzip"> + <tarfileset dir="${build.dir}" prefix="${version}"> + <include name="build.properties" /> + <include name="build.xml" /> + <include name="LICENSE.txt" /> + <include name="overview.html" /> + <include name="README-JINI" /> + <include name="LEGAL/*" /> + + <include name="bigdata/src/**" /> + <include name="bigdata-jini/src/**" /> + <include name="bigdata-rdf/src/**" /> + <include name="bigdata-sails/src/**" /> + <include name="lgpl-utils/src/**" /> + <include name="bigdata/lib/**" /> + <include name="bigdata-jini/lib/**" /> + <include name="bigdata-rdf/lib/**" /> + <include name="bigdata-sails/lib/**" /> + <include name="src/**" /> + <exclude name="classes/**" /> + <exclude name="${version}.jar" /> + <exclude name="lib/**" /> + <exclude name="docs/**" /> + + <exclude name="dist/bigdata/**" /> + + </tarfileset> + + <!-- Add dist files separately, minus scripts --> + + <tarfileset dir="${bigdata.dir}" prefix="${version}"> + <include name="dist/bigdata/**" /> + + <exclude name="dist/bigdata/bin/pstart" /> + </tarfileset> + + <!-- Add dist scripts separately, making them executable --> + + <tarfileset dir="${bigdata.dir}" prefix="${version}" filemode="755"> + <include name="dist/bigdata/bin/pstart" /> + </tarfileset> + </tar> + + <tar destfile="${bigdata.dir}/REL.${version}.tgz" + basedir="${bigdata.dir}/dist" + compression="gzip"> + </tar> + </target> + + <target name="ant-install" depends="jar, banner, bundle" description="Ant based install on a node."> + <mkdir dir="${NAS}" /> + <mkdir dir="${LAS}" /> + <chmod perm="ug+rw,o-rw"> + <fileset dir="${NAS}" /> + </chmod> + <chmod perm="ug+rw,o-rw"> + <fileset dir="${LAS}" /> + </chmod> + <mkdir dir="${install.config.dir}" /> + <mkdir dir="${install.doc.dir}" /> + <mkdir dir="${install.lib.dir}" /> + <mkdir dir="${install.bin.dir}" /> + <mkdir dir="${install.log.dir}" /> + <mkdir dir="${install.dist.dir}" /> + <copy toDir="${install.config.dir}"> + <fileset dir="${bigdata.dir}/src/resources/config" /> + </copy> + <copy toDir="${install.doc.dir}"> + <fileset dir="${bigdata.dir}"> + <include name="LICENSE.txt" /> + <include name="overview.html" /> + <include name="README-JINI" /> + <include name="bigdata/LEGAL/*" /> + <include name="bigdata-jini/LEGAL/*" /> + <include name="bigdata-rdf/LEGAL/*" /> + <include name="bigdata-sails/LEGAL/*" /> + </fileset> + </copy> + + <copy toDir="${install.lib.dir}"> + <fileset dir="${build.dir}/lib" /> + <fileset file="${build.dir}/${version}.jar" /> + </copy> + + <copy toDir="${install.bin.dir}"> + <fileset dir="src/resources/scripts" /> + </copy> + + <copy toDir="${install.dist.dir}"> + <fileset dir="${bigdata.dir}/dist"> + <include name="bigdata/**" /> + </fileset> + </copy> + + <!-- parameter substitution. --> + <property name="myclasspath" refid="install.classpath" /> + <replace dir="${install.bin.dir}" summary="true"> + <replacefilter token="@FED@" value="${FED}" /> + <replacefilter token="@NAS@" value="${NAS}" /> + <replacefilter token="@LAS@" value="${LAS}" /> + <replacefilter token="@JAVA_HOME@" value="${JAVA_HOME}" /> + <replacefilter token="@JINI_CLASS_SERVER_PORT@" value="${JINI_CLASS_SERVER_PORT}" /> + <replacefilter token="@LOAD_BALANCER_PORT@" value="${LOAD_BALANCER_PORT}" /> + <replacefilter token="@SYSSTAT_HOME@" value="${SYSSTAT_HOME}" /> + <replacefilter token="@USE_NIO@" value="${USE_NIO}" /> + <replacefilter token="@BIN_DIR@" value="${install.bin.dir}" /> + <replacefilter token="@LIB_DIR@" value="${install.lib.dir}" /> + <replacefilter token="@LOG_DIR@" value="${install.log.dir}" /> + <replacefilter token="@CONFIG_DIR@" value="${install.config.dir}" /> + <replacefilter token="@INSTALL_USER@" value="${install.user}" /> + <replacefilter token="@INSTALL_GROUP@" value="${install.group}" /> + <replacefilter token="@LOCK_FILE@" value="${LOCK_FILE}" /> + <replacefilter token="@BIGDATA_CONFIG@" value="${bigdata.config}" /> + <replacefilter token="@JINI_CONFIG@" value="${jini.config}" /> + <replacefilter token="@POLICY_FILE@" value="${policyFile}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_HOST@" value="${LOG4J_SOCKET_LOGGER_HOST}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_PORT@" value="${LOG4J_SOCKET_LOGGER_PORT}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_CONFIG@" value="${log4jServer.config}" /> + <replacefilter token="@LOG4J_DATE_PATTERN@" value="${LOG4J_DATE_PATTERN}" /> + <replacefilter token="@LOG4J_CONFIG@" value="${log4j.config}" /> + <replacefilter token="@LOGGING_CONFIG@" value="${logging.config}" /> + <replacefilter token="@ERROR_LOG@" value="${errorLog}" /> + <replacefilter token="@DETAIL_LOG@" value="${detailLog}" /> + <replacefilter token="@EVENT_LOG@" value="${eventLog}" /> + <replacefilter token="@RULE_LOG@" value="${ruleLog}" /> + <replacefilter token="@STATE_LOG@" value="${stateLog}" /> + <replacefilter token="@STATE_FILE@" value="${stateFile}" /> + <replacefilter token="@FORCE_KILL_ALL@" value="${forceKillAll}" /> + <replacefilter token="@NTP_MASTER@" value="${NTP_MASTER}" /> + <replacefilter token="@NTP_NETWORK@" value="${NTP_NETWORK}" /> + <replacefilter token="@NTP_NETMASK@" value="${NTP_NETMASK}" /> + <replacefilter token="@CLASSPATH@" value="${myclasspath}" /> + </replace> + + <replace dir="${install.config.dir}" summary="true"> + <replacefilter token="@FED@" value="${FED}" /> + <replacefilter token="@NAS@" value="${NAS}" /> + <replacefilter token="@LAS@" value="${LAS}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_HOST@" value="${LOG4J_SOCKET_LOGGER_HOST}" /> + <replacefilter token="@JAVA_HOME@" value="${JAVA_HOME}" /> + <replacefilter token="@JINI_CLASS_SERVER_PORT@" value="${JINI_CLASS_SERVER_PORT}" /> + <replacefilter token="@LOAD_BALANCER_PORT@" value="${LOAD_BALANCER_PORT}" /> + <replacefilter token="@SYSSTAT_HOME@" value="${SYSSTAT_HOME}" /> + <replacefilter token="@USE_NIO@" value="${USE_NIO}" /> + <replacefilter token="@BIN_DIR@" value="${install.bin.dir}" /> + <replacefilter token="@LIB_DIR@" value="${install.lib.dir}" /> + <replacefilter token="@LOG_DIR@" value="${install.log.dir}" /> + <replacefilter token="@CONFIG_DIR@" value="${install.config.dir}" /> + <replacefilter token="@INSTALL_USER@" value="${install.user}" /> + <replacefilter token="@INSTALL_GROUP@" value="${install.group}" /> + <replacefilter token="@LOCK_FILE@" value="${LOCK_FILE}" /> + <replacefilter token="@BIGDATA_CONFIG@" value="${bigdata.config}" /> + <replacefilter token="@JINI_CONFIG@" value="${jini.config}" /> + <replacefilter token="@POLICY_FILE@" value="${policyFile}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_HOST@" value="${LOG4J_SOCKET_LOGGER_HOST}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_PORT@" value="${LOG4J_SOCKET_LOGGER_PORT}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_CONFIG@" value="${log4jServer.config}" /> + <replacefilter token="@LOG4J_DATE_PATTERN@" value="${LOG4J_DATE_PATTERN}" /> + <replacefilter token="@LOG4J_CONFIG@" value="${log4j.config}" /> + <replacefilter token="@LOGGING_CONFIG@" value="${logging.config}" /> + <replacefilter token="@ERROR_LOG@" value="${errorLog}" /> + <replacefilter token="@DETAIL_LOG@" value="${detailLog}" /> + <replacefilter token="@EVENT_LOG@" value="${eventLog}" /> + <replacefilter token="@RULE_LOG@" value="${ruleLog}" /> + <replacefilter token="@STATE_LOG@" value="${stateLog}" /> + <replacefilter token="@STATE_FILE@" value="${stateFile}" /> + <replacefilter token="@FORCE_KILL_ALL@" value="${forceKillAll}" /> + <replacefilter token="@NTP_MASTER@" value="${NTP_MASTER}" /> + <replacefilter token="@NTP_NETWORK@" value="${NTP_NETWORK}" /> + <replacefilter token="@NTP_NETMASK@" value="${NTP_NETMASK}" /> + <replacefilter token="@CLASSPATH@" value="${myclasspath}" /> + <!-- updates the configuration file to locate the lubm ontology. --> + <replacefilter token="@install.lubm.config.dir@" value="${install.lubm.config.dir}" /> + </replace> + + <!-- fix newlines (otherwise substitutions cause things to break). --> + <fixcrlf srcDir="${install.config.dir}" /> + <fixcrlf srcDir="${install.bin.dir}" /> + + <!-- set execute bit for scripts in this directory (must be the last step). --> + <chmod perm="u+x,g+rx,o-rwx"> + <fileset dir="${install.bin.dir}"> + <exclude name="README"/> + <exclude name="POST-INSTALL"/> + </fileset> + </chmod> + + <!-- Setup the status file which will be read by the bigdata script and + the log on which that script will write its output. This is used + if cron, or a similar process, will execute the script on a periodic + basis. The initial state is always 'status'. The initial stateLog + is always empty. The state file must be readable by the group, but + could be restricted to write by a specific user. The stateLog must be + read/write for the group. --> + +<echo file="${stateFile}">status</echo> +<echo file="${stateLog}"> +</echo> + + <chmod perm="g+rw,o-rw" file="${stateFile}" /> + <chmod perm="g+rw,o-rw" file="${stateLog}" /> + + <!-- Make sure that the entire shared directory structure is read/write for the group. --> + <chmod perm="g+rwx" type="both" dir="${NAS}" verbose="true"/> + +<!-- Make sure that it is all accessible to the install group (ant 1.6+ plus extension module required). + <chown file="${NAS}" type="both" owner="${install.user}.${install.group}" verbose="true"/> +--> + + <!-- Works for earlier versions of ant LT 1.6 which do not bundle "chown". --> + <apply executable="chown" description="set owner on NAS files" os="Linux"> + <arg value="-R"/> + <arg value="${install.user}.${install.group}"/> + <dirset dir="${NAS}"/> + </apply> + + <!-- @todo check the installed configuration file (after parameter substitution). --> + <!-- @todo also check the installed jini configuration files. --> + <java classname="com.bigdata.jini.util.CheckConfiguration" + failonerror="true" fork="true" logerror="true"> + <classpath refid="install.classpath" /> + <arg value="${bigdata.config}" /> + </java> + + <loadfile property="postInstallMessage" srcFile="${install.bin.dir}/POST-INSTALL" /> + +<echo> + +${postInstallMessage}</echo> + + </target> + + <!-- --> + <!-- UNIT TESTS --> + <!-- --> <target name="testCompile" description="compiles the test source and generates the appropriate jar files." depends="stage"> Modified: trunk/src/resources/config/bigdataCluster.config =================================================================== --- trunk/src/resources/config/bigdataCluster.config 2010-07-22 14:29:54 UTC (rev 3264) +++ trunk/src/resources/config/bigdataCluster.config 2010-07-22 14:37:05 UTC (rev 3265) @@ -378,6 +378,9 @@ "-Xmx400m", "-Djava.security.policy="+bigdata.policy, "-Djava.util.logging.config.file="+bigdata.logging, + "-Dlog4j.configuration="+bigdata.log4j, + "-Dlog4j.primary.configuration="+bigdata.log4j, + "-DinitialMemberGroups="+bigdata.fedname }; Modified: trunk/src/resources/config/jini/reggie.config =================================================================== --- trunk/src/resources/config/jini/reggie.config 2010-07-22 14:29:54 UTC (rev 3264) +++ trunk/src/resources/config/jini/reggie.config 2010-07-22 14:37:05 UTC (rev 3265) @@ -1,3 +1,52 @@ + +import java.net.NetworkInterface; + +import com.sun.jini.config.ConfigUtil; +import net.jini.constraint.BasicMethodConstraints; +import net.jini.core.constraint.ConnectionRelativeTime; +import net.jini.core.constraint.InvocationConstraints; +import net.jini.jeri.BasicILFactory; +import net.jini.jeri.BasicJeriExporter; +import net.jini.jeri.tcp.TcpServerEndpoint; +import net.jini.core.discovery.LookupLocator; + +import com.bigdata.util.config.NicUtil; + com.sun.jini.reggie { - initialMemberGroups = new String[] { System.getProperty("user.name") + "InstallVerifyGroup" }; + + private static exportIpAddr = NicUtil.getIpAddress(null, null, true, true); + + private static exportPort = + Integer.parseInt( System.getProperty("exportPort", "0") ); + + private static serverILFactory = + new BasicILFactory( + new BasicMethodConstraints( + new InvocationConstraints( + new ConnectionRelativeTime(10000L), + null)), + null); + + serverExporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(exportIpAddr,exportPort), + serverILFactory, + false, + true); + + initialMemberGroups = new String[] { System.getProperty("initialMemberGroups", System.getProperty("user.name")+"InstallVerifyGroup" ) }; + initialLookupGroups = initialMemberGroups; + initialLookupLocators = new LookupLocator[] { }; + + unicastDiscoveryHost = exportIpAddr; + multicastInterfaces = new NetworkInterface[] { + NicUtil.getNetworkInterface(exportIpAddr) + }; + + minMaxServiceLease = 60000L; } + +net.jini.discovery.LookupDiscovery { + multicastRequestHost = com.sun.jini.reggie.exportIpAddr; + multicastInterfaces = new NetworkInterface[] { + NicUtil.getNetworkInterface(com.sun.jini.reggie.exportIpAddr) + }; +} Modified: trunk/src/resources/config/jini/startAll.config =================================================================== --- trunk/src/resources/config/jini/startAll.config 2010-07-22 14:29:54 UTC (rev 3264) +++ trunk/src/resources/config/jini/startAll.config 2010-07-22 14:37:05 UTC (rev 3265) @@ -19,11 +19,6 @@ browser: A visualization tool for exploring Jini(TM) Network Technology communities. reggie: Provides implementations of ServiceRegistrar. - fiddler: Provides the server side of an implementation of the lookup discovery service. - mahalo: Provides implementations of the TransactionManager service. - mercury: Provides implementations of the EventMailbox service. - norm: Provides implementations of LeaseRenewalService. -outrigger: Provides implementation of a JavaSpaces(TM) technology-enabled service. */ com.sun.jini.start { @@ -63,41 +58,6 @@ policyFile, libDir+"reggie.jar", "com.sun.jini.reggie.TransientRegistrarImpl", - new String[] { configDir+"reggie.config" }), - - new NonActivatableServiceDescriptor( - "http://" + host + ":" + port + "/fiddler-dl.jar" + jskdl, - policyFile, - libDir+"fiddler.jar", - "com.sun.jini.fiddler.TransientFiddlerImpl", - new String[] { configDir+"fiddler.config" }), - - new NonActivatableServiceDescriptor( - "http://" + host + ":" + port + "/mahalo-dl.jar" + jskdl, - policyFile, - libDir+"mahalo.jar", - "com.sun.jini.mahalo.TransientMahaloImpl", - new String[] { configDir+"mahalo.config" }), - - new NonActivatableServiceDescriptor( - "http://" + host + ":" + port + "/mercury-dl.jar" + jskdl, - policyFile, - libDir+"mercury.jar", - "com.sun.jini.mercury.TransientMercuryImpl", - new String[] { configDir+"mercury.config" }), - - new NonActivatableServiceDescriptor( - "http://" + host + ":" + port + "/norm-dl.jar" + jskdl, - policyFile, - libDir+"norm.jar", - "com.sun.jini.norm.TransientNormServerImpl", - new String[] { configDir+"norm.config" }), - - new NonActivatableServiceDescriptor( - "http://" + host + ":" + port + "/outrigger-dl.jar" + jskdl, - policyFile, - libDir+"outrigger.jar", - "com.sun.jini.outrigger.TransientOutriggerImpl", - new String[] { configDir+"outrigger.config" }) + new String[] { configDir+"reggie.config" }) }; } Modified: trunk/src/resources/config/logging.properties =================================================================== --- trunk/src/resources/config/logging.properties 2010-07-22 14:29:54 UTC (rev 3264) +++ trunk/src/resources/config/logging.properties 2010-07-22 14:37:05 UTC (rev 3265) @@ -40,7 +40,7 @@ java.util.logging.FileHandler.formatter = java.util.logging.XMLFormatter # Limit the message that are printed on the console to INFO and above. -java.util.logging.ConsoleHandler.level = INFO +java.util.logging.ConsoleHandler.level = FINEST java.util.logging.ConsoleHandler.formatter = java.util.logging.SimpleFormatter @@ -51,11 +51,18 @@ # For example, set the com.xyz.foo logger to only log SEVERE # messages: -com.xyz.foo.level = SEVERE +# com.xyz.foo.level = SEVERE # This turns off the annoying stack trace when we probe to determine # whether or not jini is already running on a specific host (unicast # discovery). Of course, it could also hide other things so you may # want to comment this out. -net.jini.discovery.LookupLocatorDiscovery = WARN +net.jini.discovery.LookupLocatorDiscovery.level = WARNING + +#com.sun.jini.start.level = FINEST +#com.sun.jini.reggie.level = FINEST +#net.jini.discovery.level = FINEST +#net.jini.config.level = FINEST +#net.jini.lookup.JoinManager.level = FINEST +#net.jini.lookup.ServiceDiscoveryManager.level = FINEST This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <btm...@us...> - 2010-07-23 22:22:02
|
Revision: 3283 http://bigdata.svn.sourceforge.net/bigdata/?rev=3283&view=rev Author: btmurphy Date: 2010-07-23 22:21:55 +0000 (Fri, 23 Jul 2010) Log Message: ----------- [trunk]: added non-graphical discovery tool and supporting classes (from dev-btm branch); to aid debugging discovery issues on a headless node where the graphical tools like the browser cannot be run Modified Paths: -------------- trunk/build.xml Added Paths: ----------- trunk/bigdata-jini/src/java/com/bigdata/attr/ trunk/bigdata-jini/src/java/com/bigdata/attr/ServiceInfo.java trunk/bigdata-jini/src/java/com/bigdata/disco/ trunk/bigdata-jini/src/java/com/bigdata/disco/DiscoveryTool.java trunk/bigdata-jini/src/java/com/bigdata/disco/config/ trunk/bigdata-jini/src/java/com/bigdata/disco/config/disco.config trunk/bigdata-jini/src/java/com/bigdata/disco/config/logging.properties trunk/bigdata-jini/src/java/com/bigdata/util/Util.java trunk/bigdata-jini/src/java/com/bigdata/util/config/ trunk/bigdata-jini/src/java/com/bigdata/util/config/ConfigDeployUtil.java trunk/src/resources/bin/disco-tool Added: trunk/bigdata-jini/src/java/com/bigdata/attr/ServiceInfo.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/attr/ServiceInfo.java (rev 0) +++ trunk/bigdata-jini/src/java/com/bigdata/attr/ServiceInfo.java 2010-07-23 22:21:55 UTC (rev 3283) @@ -0,0 +1,218 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ + +package com.bigdata.attr; + +import net.jini.entry.AbstractEntry; + +import java.io.Serializable; +import java.net.InetAddress; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +public class ServiceInfo extends AbstractEntry { + + private static final long serialVersionUID = 1L; + + public ServiceInfo() { } + + /** + * The unique id of the service associated with the given instance of + * this class. + * + * @serial + */ + public UUID source; + + /** + * <code>String</code> value that should contain a human readable name, + * description, or identifying string that can be used for display by + * clients managing or monitoring the associated service. Note that if + * this field is <code>null</code> or the empty string, then clients + * are encouraged to use the associated service's class name in such + * displays. + * + * @serial + */ + public String serviceName; + + /** + * <code>Map</code> whose key-value pairs consist of an + * <code>InetAddress</code> as key, and a <code>String</code> as + * corresponding value. Each key in the map represents an IP address + * associated with one of the network interface(s) of the node + * on which the associated service is exectuing. Each key's value + * is the name of the network interface to which that IP address + * is assigned. + * + * @serial + */ + public Map<InetAddress,String> inetAddresses; + + /** + * <code>String</code> value that is unique across all <i>nodes</i> + * in the system. With respect to the services executing on a given + * node, each such service should be able to discover, (or + * independently set through configuration for example), the information + * represented by the value of this field. For example, the value in this + * field might be set to the MAC address of an agreed upon network + * interface card (NIC) installed on the node on which the associated + * service is executing, or it might simply be that node's host name + * or IP address. + * + * @serial + */ + public String nodeToken; + + /** + * The unique id of the <i>node service</i> having the same + * <i>node token</i> as the token value referenced in this class; + * where a single node service executes on each node in the system, + * acting as a physical representation of the node on which this + * class' associated service also executes. + * + * @serial + */ + public UUID nodeId; + + /** + * <code>String</code> value representing the <i>logical name</i> that + * has been assigned to the <i>node</i> on which the associated service + * is executing. + * + * @serial + */ + public String nodeName; + + /** + * <code>Integer</code> value that uniquely identifies the + * physical position, within a given <i>rack</i>, in which the + * associated node has been mounted, relative to all other + * devices in the rack. + * <p> + * To understand the value that this field corresponds to, recall that + * the possible positions in which devices can be mounted in a given + * rack are generally identified by labeling the rack with a sequence + * of monotonically increasing integers in which the space in the rack + * from one number in the sequence to the next, represents <i>1U</i> in + * height; that is, 1 <i>rack unit</i> in height. As such, some devices + * will occupy only 1U of the rack, where the <i>U-number range</i> of + * such a device would correspond to the labels <b>n</b> and <b>n+1</b>; + * whereas other, larger devices may occupy 2U or 4U of the rack, and + * such devices would have U-number ranges of <b>n</b> to <b>n+2</b>, + * and <b>n</b> to <b>n+4</b> respectively. With respect to this + * so-called U-number range, the value of this field then, is the + * <i>first</i> number of the associated node's U-number range; + * that is, the vlaue of the U-number label adjacent to the top of + * the mounted node, relative to that device's position in the rack. + * For example, consider a node that is 4U in height and which + * is mounted in a given rack, occupying the positions ranging from + * U-number 75 to U-number 79. For that particular node, the value + * of this field would be 75; the first number of the range that is + * occupied. + * + * @serial + */ + public Integer uNumber; + + /** + * <code>String</code> value that uniquely identifies the <i>rack</i> + * (within a given <i>cage</i>) in the system that contains the + * <i>node</i> on which the associated service is executing. + * + * @serial + */ + public String rack; + + /** + * <code>String</code> value that uniquely identifies the <i>cage</i> + * (within a given <i>zone</i>) that contains the <i>rack</i> in which + * the associated service is executing. + * + * @serial + */ + public String cage; + + /** + * <code>String</code> value that uniquely identifies the <i>zone</i> + * (within a given <i>site</i>) that contains the <i>cage</i> in which + * the associated service is executing. + * + * @serial + */ + public String zone; + + /** + * <code>String</code> value that uniquely identifies the <i>site</i> + * (within a given <i>region</i>) that contains the <i>zone</i> in which + * the associated service is executing. + * + * @serial + */ + public String site; + + /** + * <code>String</code> value that uniquely identifies the <i>region</i> + * (within a given <i>geo</i>) that contains the <i>site</i> in which + * the associated service is executing. + * + * @serial + */ + public String region; + + /** + * <code>String</code> value that uniquely identifies a given <i>geo</i> + * designation, across <i>all</i> geo's, that contains the <i>region</i> + * in which the associated service is executing. + * + * @serial + */ + public String geo; + + /** + * @see <code>net.jini.entry.AbstractEntry#equals</code>. + */ + @Override + public boolean equals(Object obj) { + return super.equals(obj); + } + + /** + * @see <code>net.jini.entry.AbstractEntry#hashCode</code>. + */ + @Override + public int hashCode() { + return super.hashCode(); + } + + /** + * @see <code>net.jini.entry.AbstractEntry#toString</code>. + */ + @Override + public String toString() { + return super.toString(); + } +} Property changes on: trunk/bigdata-jini/src/java/com/bigdata/attr/ServiceInfo.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Added: trunk/bigdata-jini/src/java/com/bigdata/disco/DiscoveryTool.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/disco/DiscoveryTool.java (rev 0) +++ trunk/bigdata-jini/src/java/com/bigdata/disco/DiscoveryTool.java 2010-07-23 22:21:55 UTC (rev 3283) @@ -0,0 +1,375 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +package com.bigdata.disco; + +import com.bigdata.util.Util; + +import com.sun.jini.config.Config; +import net.jini.config.Configuration; +import net.jini.config.ConfigurationException; +import net.jini.config.ConfigurationProvider; +import net.jini.core.constraint.MethodConstraints; +import net.jini.core.discovery.LookupLocator; +import net.jini.core.entry.Entry; +import net.jini.core.lookup.ServiceItem; +import net.jini.core.lookup.ServiceTemplate; +import net.jini.discovery.ConstrainableLookupLocator; +import net.jini.discovery.DiscoveryManagement; +import net.jini.discovery.DiscoveryGroupManagement; +import net.jini.discovery.DiscoveryLocatorManagement; +import net.jini.lookup.LookupCache; +import net.jini.lookup.ServiceDiscoveryEvent; +import net.jini.lookup.ServiceDiscoveryListener; +import net.jini.lookup.ServiceDiscoveryManager; +import net.jini.lookup.ServiceItemFilter; + +import static java.lang.Boolean.parseBoolean; +import static java.lang.Integer.parseInt; +import static java.lang.Long.parseLong; +import static java.lang.Thread.sleep; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.synchronizedMap; +import static net.jini.discovery.Constants.discoveryPort; +import static net.jini.discovery.DiscoveryGroupManagement.ALL_GROUPS; + +import java.io.IOException; +import java.io.PrintStream; +import java.text.DateFormat; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.WeakHashMap; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class DiscoveryTool { + + private static final String COMPONENT_NAME = DiscoveryTool.class.getName(); + private static final String CONFIG_ARG = "config"; + private static final String GROUP_ARG_PREFIX = "group"; + private static final String LOCATOR_ARG_PREFIX = "locator"; + private static final String SERVICE_ARG_PREFIX = "service"; + private static final String ALL_GROUPS_ARG = "allGroups"; + private static final String TIMEOUT_ARG = "timeout"; + private static final String OUTPUT_ARG = "output"; + private static final String VERBOSE_ARG = "verbose"; + private static final Pattern hostPortPattern = + Pattern.compile("(.+):(\\d+)"); + + public static void main(String[] args) throws Exception { + new DiscoveryTool(args).run(); + } + + private final Map<Class, Collection<Class>> typeCache = + synchronizedMap(new WeakHashMap<Class, Collection<Class>>()); + private final Configuration config; + private final Collection<String> groups = new ArrayList<String>(); + private final Collection<LookupLocator> locators = + new ArrayList<LookupLocator>(); + private final Collection<Pattern> serviceTypePatterns = + new ArrayList<Pattern>(); + private final boolean allGroups; + private final Long timeout; + private final PrintStream out; + private final boolean verbose; + private final DateFormat dateFormat; + private final Set<String> uninterestingInterfaces; + + private DiscoveryTool(String[] rawArgs) + throws ConfigurationException, IOException + { + Arguments args = new Arguments(rawArgs); + + config = ConfigurationProvider.getInstance + ( new String[]{ args.get(CONFIG_ARG) }, + (this.getClass()).getClassLoader() ); + + for (int i = 0; ; i++) { + String argName = GROUP_ARG_PREFIX + i; + if (!args.contains(argName)) { + break; + } + groups.add(args.get(argName)); + } + + MethodConstraints locatorConstraints = + (MethodConstraints)config.getEntry(COMPONENT_NAME, + "lookupLocatorConstraints", + MethodConstraints.class, + null); + for (int i = 0; ; i++) { + String argName = LOCATOR_ARG_PREFIX + i; + if (!args.contains(argName)) { + break; + } + String val = args.get(argName); + Matcher m = hostPortPattern.matcher(val); + locators.add(m.matches() ? + new ConstrainableLookupLocator( + m.group(1), parseInt(m.group(2)), locatorConstraints) : + new ConstrainableLookupLocator( + val, discoveryPort, locatorConstraints)); + } + + for (int i = 0; ; i++) { + String argName = SERVICE_ARG_PREFIX + i; + if (!args.contains(argName)) { + break; + } + serviceTypePatterns.add(Pattern.compile(args.get(argName))); + } + + timeout = args.contains(TIMEOUT_ARG) ? + parseLong(args.get(TIMEOUT_ARG)) : null; + + if (args.contains(OUTPUT_ARG)) { + String val = args.get(OUTPUT_ARG); + out = val.equals("-") ? System.out : new PrintStream(val); + } else { + out = System.out; + } + + allGroups = parseBoolean(args.get(ALL_GROUPS_ARG)); + verbose = parseBoolean(args.get(VERBOSE_ARG)); + + dateFormat = (DateFormat)Config.getNonNullEntry + (config, COMPONENT_NAME, "dateFormat", + DateFormat.class, DateFormat.getInstance()); + + uninterestingInterfaces = + new HashSet<String>(asList((String[])Config.getNonNullEntry + (config, COMPONENT_NAME, + "uninterestingInterfaces", + String[].class, + new String[0]))); + } + + void run() + throws ConfigurationException, IOException, InterruptedException + { + DiscoveryManagement discoveryManager = null; + ServiceDiscoveryManager serviceDiscovery = null; + + try { + println(getDateString(), ": starting discovery"); + + discoveryManager = + Util.getDiscoveryManager(config, COMPONENT_NAME); + + verbose("created lookup discovery manager: ", discoveryManager); + + verbose("groups: ", allGroups ? "<all>" : groups.toString()); + ((DiscoveryGroupManagement) discoveryManager).setGroups( + allGroups ? + ALL_GROUPS : groups.toArray(new String[groups.size()])); + verbose("locators: ", locators); + ((DiscoveryLocatorManagement) discoveryManager).setLocators( + locators.toArray(new LookupLocator[locators.size()])); + + serviceDiscovery = + new ServiceDiscoveryManager(discoveryManager, null, config); + verbose("created service discovery manager: ", serviceDiscovery); + + verbose("service type patterns: ", serviceTypePatterns); + ServiceItemFilter serviceFilter = serviceTypePatterns.isEmpty() ? + null : new ServiceTypeFilter(serviceTypePatterns); + LookupCache cache = serviceDiscovery.createLookupCache( + null, serviceFilter, new Listener()); + verbose("created lookup cache: ", cache); + + verbose( + "timeout: ", (timeout != null) ? (timeout + " ms") : "<none>"); + if (timeout != null) { + sleep(timeout); + } else { + while (true) { + sleep(Long.MAX_VALUE); + } + } + + verbose("shutting down"); + + } finally { + if (serviceDiscovery != null) { + serviceDiscovery.terminate(); + } + if (discoveryManager != null) { + discoveryManager.terminate(); + } + } + } + + void println(Object... args) { + StringBuilder sb = new StringBuilder(); + for (Object arg : args) { + sb.append(arg); + } + out.println(sb); + } + + void verbose(Object... args) { + if (verbose) { + println(args); + } + } + + String getDateString() { + return dateFormat.format(new Date()); + } + + Collection<Class> getTypes(Object obj) { + if (obj == null) { + return emptyList(); + } + Class cl = obj.getClass(); + Collection<Class> types = typeCache.get(cl); + if (types == null) { + Set<Class> s = new HashSet<Class>(); + accumulateTypes(cl, s); + types = new ArrayList<Class>(s); + typeCache.put(cl, types); + } + return types; + } + + private static void accumulateTypes(Class cl, Set<Class> types) { + if (cl != null && types.add(cl)) { + accumulateTypes(cl.getSuperclass(), types); + for (Class intf : cl.getInterfaces()) { + accumulateTypes(intf, types); + } + } + } + + private static String join(Iterable<?> i, String delim) { + Iterator<?> iter = i.iterator(); + if (!iter.hasNext()) { + return ""; + } + StringBuilder sb = new StringBuilder(); + sb.append(iter.next()); + while (iter.hasNext()) { + sb.append(delim); + sb.append(iter.next()); + } + return sb.toString(); + } + + private class Listener implements ServiceDiscoveryListener { + + Listener() { + } + + public synchronized void serviceAdded(ServiceDiscoveryEvent event) { + println(getDateString(), ": service added"); + printServiceItem(event.getPostEventServiceItem()); + } + + public synchronized void serviceChanged(ServiceDiscoveryEvent event) { + println(getDateString(), ": service changed"); + printServiceItem(event.getPostEventServiceItem()); + } + + public synchronized void serviceRemoved(ServiceDiscoveryEvent event) { + println(getDateString(), ": service removed"); + printServiceItem(event.getPreEventServiceItem()); + } + + private void printServiceItem(ServiceItem item) { + Collection<String> typeNames = new ArrayList<String>(); + for (Class type : getTypes(item.service)) { + if (type.isInterface() && + !uninterestingInterfaces.contains(type.getName())) + { + typeNames.add(type.getName()); + } + } + + println(" Service ID: ", item.serviceID); + println(" Types: ", join(typeNames, ", ")); + println(" Attributes:"); + for (Entry e : item.attributeSets) { + println(" ", e); + } + println(); + } + } + + private class ServiceTypeFilter implements ServiceItemFilter { + + private final Collection<Pattern> serviceTypePatterns; + + ServiceTypeFilter(Collection<Pattern> serviceTypePatterns) { + this.serviceTypePatterns = serviceTypePatterns; + } + + public boolean check(ServiceItem item) { + for (Class type : getTypes(item.service)) { + for (Pattern pattern : serviceTypePatterns) { + if (pattern.matcher(type.getName()).find()) { + return true; + } + } + } + return false; + } + } + + private static class Arguments { + + private static final Pattern pattern = Pattern.compile("(.+)=(.+)"); + private final Map<String, String> map = new HashMap<String, String>(); + + Arguments(String[] rawArgs) { + for (String arg : rawArgs) { + Matcher m = pattern.matcher(arg); + if (!m.matches()) { + throw new IllegalArgumentException( + "invalid argument: " + arg); + } + map.put(m.group(1), m.group(2)); + } + } + + boolean contains(String name) { + return map.containsKey(name); + } + + String get(String name) { + String val = map.get(name); + if (val != null) { + return val; + } else { + throw new IllegalArgumentException("no value for " + name); + } + } + } +} Property changes on: trunk/bigdata-jini/src/java/com/bigdata/disco/DiscoveryTool.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Added: trunk/bigdata-jini/src/java/com/bigdata/disco/config/disco.config =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/disco/config/disco.config (rev 0) +++ trunk/bigdata-jini/src/java/com/bigdata/disco/config/disco.config 2010-07-23 22:21:55 UTC (rev 3283) @@ -0,0 +1,65 @@ +/* Configuration file for the discovery tool */ + +// Note that this file has no tunable parameters +// and should not be modified. + +import java.text.DateFormat; + +import net.jini.constraint.BasicMethodConstraints; +import net.jini.core.constraint.ConnectionRelativeTime; +import net.jini.core.constraint.InvocationConstraints; +import net.jini.core.discovery.LookupLocator; +import net.jini.discovery.LookupDiscoveryManager; +import net.jini.jeri.BasicILFactory; +import net.jini.jeri.BasicJeriExporter; +import net.jini.jeri.tcp.TcpServerEndpoint; + +import com.bigdata.util.config.NicUtil; + +com.bigdata.disco.DiscoveryTool { + + private static exportIpAddr = + NicUtil.getIpAddress("${exportNic}", 0, "${exportHost}"); + + lookupLocatorConstraints = null; + + dateFormat = + DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.FULL); + + uninterestingInterfaces = + new String[] { "java.io.Serializable", + "java.rmi.Remote", + "net.jini.admin.Administrable", + "net.jini.core.constraint.RemoteMethodControl", + "net.jini.id.ReferentUuid", + "com.bigdata.service.Service", + "com.bigdata.service.EventReceivingService" + }; + + + static discoveryManager = + new LookupDiscoveryManager + (new String[]{}, new LookupLocator[]{}, null, this); +} + +net.jini.discovery.LookupDiscovery { + + multicastRequestHost = + com.bigdata.disco.DiscoveryTool.exportIpAddr; + + multicastInterfaces = + NicUtil.getNetworkInterfaceArray("${networkInterface}"); +} + +net.jini.lookup.ServiceDiscoveryManager { + + private static serverILFactory = + new BasicILFactory( + new BasicMethodConstraints( + new InvocationConstraints( + new ConnectionRelativeTime(10000L), null)), + null); + eventListenerExporter = + new BasicJeriExporter(TcpServerEndpoint.getInstance(com.bigdata.disco.DiscoveryTool.exportIpAddr, 0), + serverILFactory, false, false); +} Property changes on: trunk/bigdata-jini/src/java/com/bigdata/disco/config/disco.config ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Added: trunk/bigdata-jini/src/java/com/bigdata/disco/config/logging.properties =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/disco/config/logging.properties (rev 0) +++ trunk/bigdata-jini/src/java/com/bigdata/disco/config/logging.properties 2010-07-23 22:21:55 UTC (rev 3283) @@ -0,0 +1,25 @@ +###################################################################### +# Log4j configuration file for the discovery tool +###################################################################### + +# FATAL +# ERROR +# WARN +# INFO +# DEBUG +# TRACE + +# All messages are directed to stderr. + +# log4j setup +log4j.rootLogger=WARN, Console +log4j.appender.Console=org.apache.log4j.ConsoleAppender +log4j.appender.Console.target=System.err +log4j.appender.Console.layout=org.apache.log4j.PatternLayout +log4j.appender.Console.layout.ConversionPattern=%d [%t] %-5p %c - %m%n + +# java.util.logging setup +.level=WARNING +handlers=com.bigdata.util.config.Log4jLoggingHandler +com.sun.jini.logging.interval=60000 + Property changes on: trunk/bigdata-jini/src/java/com/bigdata/disco/config/logging.properties ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Added: trunk/bigdata-jini/src/java/com/bigdata/util/Util.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/util/Util.java (rev 0) +++ trunk/bigdata-jini/src/java/com/bigdata/util/Util.java 2010-07-23 22:21:55 UTC (rev 3283) @@ -0,0 +1,426 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ + +package com.bigdata.util; + +import com.bigdata.util.config.ConfigDeployUtil; +import com.bigdata.util.config.LogUtil; + +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + +import com.sun.jini.config.Config; +import com.sun.jini.thread.InterruptedStatusThread; +import net.jini.config.Configuration; +import net.jini.config.ConfigurationException; +import net.jini.config.NoSuchEntryException; +import net.jini.core.lookup.ServiceID; +import net.jini.discovery.DiscoveryManagement; +import net.jini.discovery.DiscoveryGroupManagement; +import net.jini.discovery.DiscoveryLocatorManagement; +import net.jini.discovery.LookupDiscoveryManager; +import net.jini.export.Exporter; +import net.jini.lookup.JoinManager; +import net.jini.lookup.ServiceDiscoveryEvent; +import net.jini.lookup.ServiceDiscoveryManager; + +import java.io.IOException; +import java.util.Collection; +import java.util.UUID; + +/** + * Miscellaneous, convenient utility methods. + */ +public class Util { + + public static <T extends Comparable<T>> T max(final T... elements){ + T max = elements[0]; + for (final T element : elements) { + if(0 < element.compareTo(max)) { + max = element; + } + } + return max; + } + + public static <T extends Comparable<T>> T min(final T... elements){ + T min = elements[0]; + for (final T element : elements) { + if(0 > element.compareTo(min)) { + min = element; + } + } + return min; + } + + /* Convenience method that can be called when a service exits, or + * when failure occurs during the service's initialization process. + * This method un-does any work that may have already been completed; + * for example, un-exports the service if it has already been + * exported, closes any open sockets or file descriptors, terminates + * threads that may have been started, etc. + * <p> + * Note that multiple versions of this method are provided. One version + * is intended to be used by entities that act only as a service (that + * is, entities that export a proxy and use a <code>JoinManager</code>). + * One version is intended to be used by entities that act only as a client + * (that is, entites that use a <code>ServiceDiscoveryManager</code>). + * And the final version can be used by entities that act as both a + * service and as a client. + */ + public static void cleanupOnExit + (Object innerProxy, + Exporter serverExporter, + JoinManager joinManager, + DiscoveryManagement discoveryManager) + { + cleanupOnExit(innerProxy, serverExporter, joinManager, + null, discoveryManager); + + } + + public static void cleanupOnExit + (ServiceDiscoveryManager serviceDiscoveryManager, + DiscoveryManagement discoveryManager) + { + cleanupOnExit(null, null, null, + serviceDiscoveryManager, discoveryManager); + } + + + public static void cleanupOnExit + (Object innerProxy, + Exporter serverExporter, + JoinManager joinManager, + ServiceDiscoveryManager serviceDiscoveryManager, + DiscoveryManagement discoveryManager) + { + if(innerProxy != null) { + try { + if(serverExporter != null) serverExporter.unexport(true); + } catch(Throwable t) { } + } + + if(joinManager != null) { + try { + joinManager.terminate(); + } catch(Throwable t) { } + } + + if(serviceDiscoveryManager != null) { + try { + serviceDiscoveryManager.terminate(); + } catch(Throwable t) { } + } + + if(discoveryManager != null) { + try { + discoveryManager.terminate(); + } catch(Throwable t) { } + } + } + + + /** + * Unexports the remote object that was exported by the given + * <code>Exporter</code> parameter; which removes the object + * from the RMI runtime so that the object can no longer accept + * incoming remote calls.er accept incoming RMI calls. + * <P> + * This method first makes an attempt to unexport the object + * 'gracefully'. That is, for a finite period of time, an attempt + * is made to allow all calls to the object that are in progress, + * or pending, to complete before the object is actually unexported. + * If, after that finite period of time, the object has not been + * successfully unexported, the object is then 'forcibly' unexported; + * that is, the object is unexported even if there are calls to + * the object that are in progress or still pending. + * <P> + * Upon successfully unexporting the given <code>Exporter</code>, + * <code>true</code> is returned. If the given <code>Exporter</code> + * cannot be unexported, or if the value input for that parameter + * is <code>null</code> or has not exported any interfaces, then + * <code>false</code> is returned. + */ + public static boolean unexportRemoteObject(Exporter exporter) { + if (exporter == null) return false; + + // delay no more than 1 minute + final long endTime = System.currentTimeMillis() + (1L*60L*1000L); + boolean unexported = false; + try { + // Unexport only if there are no pending or in-progress calls + while (!unexported && System.currentTimeMillis() < endTime) { + unexported = exporter.unexport(false);//do not force + if (!unexported) Thread.yield(); + }//end loop + if (!unexported) unexported = exporter.unexport(true);//force + } catch ( IllegalStateException e ) { + // Thrown if no object has been exported with the + // Exporter instance + return false; + } + return unexported; + } + + + /** + * Convenience method that can be called in an entity's constructor + * when failure occurs during the initialization process. This + * method simply rethrows the given <code>Throwable</code> so the + * constructor doesn't have to. + */ + public static void handleInitThrowable(Throwable t, Logger logger) + throws IOException, + ConfigurationException + { + if( logger != null ) { + logger.log(Level.FATAL, "initialization failure ... ", t); + } else { + System.err.println("FATAL: initialization failure ... "+t); + }//endif + if (t instanceof IOException) { + throw (IOException)t; + } else if (t instanceof ConfigurationException) { + throw (ConfigurationException)t; + } else if (t instanceof RuntimeException) { + throw (RuntimeException)t; + } else if (t instanceof Error) { + throw (Error)t; + }//endif + } + + /** + * Convenience method that returns a <code>String</code> containing + * a common-separated list the elements (group names) of the given + * array. + */ + public static String writeGroupArrayToString(String[] groups) { + if(groups == null) { + return new String("[ALL_GROUPS]"); + }//endif + if(groups.length <= 0) { + return new String("[]"); + }//endif + StringBuffer strBuf = null; + if(groups[0].compareTo("") == 0) { + strBuf = new StringBuffer("[The PUBLIC Group"); + } else { + strBuf = new StringBuffer("["+groups[0]); + }//endif + for(int i=1;i<groups.length;i++) { + if(groups[i].compareTo("") == 0) { + strBuf.append(", The PUBLIC Group"); + } else { + strBuf.append(", ").append(groups[i]); + }//endif + }//end loop + strBuf.append("]"); + return strBuf.toString(); + } + + /** + * Convenience method that returns a <code>String</code> containing + * a common-separated list the elements (locators) of the given + * array. + */ + public static String writeArrayElementsToString(Object[] arr) { + if(arr == null) return new String("[]"); + if(arr.length <= 0) { + return new String("[]"); + }//endif + StringBuffer strBuf = new StringBuffer("["+arr[0]); + for(int i=1;i<arr.length;i++){ + strBuf.append(", ").append(arr[i]); + }//end loop + strBuf.append("]"); + return strBuf.toString(); + } + + /** + * Convenience method to simplify the throwing of exceptions with embedded + * causes (avoids having to cast the return value of Throwable.initCause + * back to the exception's type). Use as follows: + * <pre> + * throw Util.initCause(new SomeException("foo"), cause); + * </pre> + */ + public static <T extends Throwable> T initCause(T t, Throwable cause) { + t.initCause(cause); + return t; + } + + /** + * Verifies that all non-<code>null</code> elements of the given + * <code>Collection</code> are assignable to the specified type, + * throwing a <code>ClassCastException</code> if any are not. + */ + public static void checkElementTypes(Collection<?> c, Class<?> type) { + for (Object elt : c) { + if (!type.isInstance(elt)) { + throw new ClassCastException( + elt + " not assignable to " + type); + } + } + } + + /** + * Returns a UUID with the same bit value as the given + * <code>ServiceID</code>. + */ + public static UUID toUUID(ServiceID serviceId) { + return new UUID( serviceId.getMostSignificantBits(), + serviceId.getLeastSignificantBits() ); + } + + /** + * Returns a string representation of the given + * <code>ServiceDiscoveryEvent</code> (since + * <code>ServiceDiscoveryEvent</code> doesn't define + * its own <code>toString</code> method). + */ + public static String eventToString(ServiceDiscoveryEvent event) { + return "ServiceDiscoveryEvent[source=" + event.getSource() + + ",preEventItem=" + event.getPreEventServiceItem() + + ",postEventItem=" + event.getPostEventServiceItem() + "]"; + } + + /** + * Convenience method that encapsulates common functions that services + * or clients may wish to perform to be able to discover lookup services + * in the system. + * <p> + * This method retrieves and returns a lookup discovery manager from + * the given <code>Configuration</code>. If no lookup discovery manager + * has been configured, this method will return an instance of the + * <code>LookupDiscoveryManager</code> helper utility class, + * initialized to discover NO_GROUPS and no locators. When such a + * discovery manager is returned, the calling entity can call the + * <code>setGroups</code> and/or </code>setLocators</code> method + * to initiate the lookup discovery process. + * <p> + * Note that this method expects that the discovery manager + * that has been configured is an instance of both + * <code>DiscoveryGroupManagement</code> and + * <code>DiscoveryLocatorManagement</code>. + * + * @param config The calling service's <code>Configuration</code> + * from which this method will retrieve the items + * needed to perform the desired initialization. + * + * @param componentName <code>String</code> whose value is the name of + * the <i>component</i> used to index the calling + * service's configuration <i>entries</i>. + * + * @param entryName <code>String</code> whose value is the name of + * the configuration entry that references the + * the desired lookup discovery manager instance + * specified in the configuration. + * + * @return An instance of <code>DiscoveryManagement</code> that supports + * both group and locator discovery; where the instance returned + * is either retrieved from the given <code>Configuration</code>, + * or is a default instance of <code>LookupDiscoveryManager</code>. + * + * @throws <code>ConfigurationException</code> when there is a problem + * retrieving the desired entry from the configuration. + * + * @throws IOException when there is a problem with multicast discovery. + */ + public static DiscoveryManagement getDiscoveryManager + (Configuration config, + String componentName, + String entryName ) + throws ConfigurationException, + IOException + { + // The discovery manager must be an instance of both + // DiscoveryGroupManagement and DiscoveryLocatorManagement, so that + // the groupsToJoin and locatorsToJoin can both be retrieved from + //the discovery manager and displayed. + DiscoveryManagement dMgr; + try { + dMgr = (DiscoveryManagement)Config.getNonNullEntry + (config, + componentName, + entryName, + DiscoveryManagement.class); + if( !(dMgr instanceof DiscoveryGroupManagement) ) { + throw new ConfigurationException + (entryName + " entry must " + +"implement DiscoveryGroupManagment"); + } + if( !(dMgr instanceof DiscoveryLocatorManagement) ) { + throw new ConfigurationException + (entryName + " entry must " + +"implement DiscoveryLocatorManagement"); + } + } catch (NoSuchEntryException e) { + return ( new LookupDiscoveryManager + (ConfigDeployUtil.getGroupsToDiscover(), + ConfigDeployUtil.getLocatorsToDiscover(), + null, config) ); + } + return dMgr; + } + + /** + * Retrieves and returns a lookup discovery manager from the given + * <code>Configuration</code>, using a default entry name of + * <i>discoveryManager</i>. + */ + public static DiscoveryManagement getDiscoveryManager + (Configuration config, + String componentName) + throws ConfigurationException, + IOException + { + return getDiscoveryManager(config, componentName, "discoveryManager"); + } + + public static class WaitOnInterruptThread extends InterruptedStatusThread { + private Logger logger; + public WaitOnInterruptThread(final Logger logger) { + super("WaitOnInterruptThread"); + setDaemon(true); + this.logger = (logger == null ? + LogUtil.getLog4jLogger((this.getClass()).getName()) : + logger); + } + public void run() { + while (!hasBeenInterrupted()) { + try { + Thread.sleep(Long.MAX_VALUE); + } catch (InterruptedException e) { + if( logger.isDebugEnabled() ) { + logger.log(Level.DEBUG, + "Util.WaitOnInterruptThread: " + +"interrupt received"); + } + } + } + } + } +} Property changes on: trunk/bigdata-jini/src/java/com/bigdata/util/Util.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Added: trunk/bigdata-jini/src/java/com/bigdata/util/config/ConfigDeployUtil.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/util/config/ConfigDeployUtil.java (rev 0) +++ trunk/bigdata-jini/src/java/com/bigdata/util/config/ConfigDeployUtil.java 2010-07-23 22:21:55 UTC (rev 3283) @@ -0,0 +1,595 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ + +package com.bigdata.util.config; + +import com.bigdata.attr.ServiceInfo; + +import com.sun.jini.config.Config; +import net.jini.config.Configuration; +import net.jini.config.ConfigurationException; +import net.jini.core.discovery.LookupLocator; +import net.jini.discovery.DiscoveryManagement; +import net.jini.discovery.DiscoveryGroupManagement; +import net.jini.discovery.DiscoveryLocatorManagement; +import net.jini.discovery.LookupDiscoveryManager; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.net.SocketException; +import java.util.Arrays; +import java.util.List; +import java.util.Properties; +import java.util.UUID; + +/** + * Utility class containing a number of convenient methods that encapsulate + * common functions related to configuration and deployment of the entity. + * The methods of this class are <code>static</code> so that they can be + * invoked from within a Jini configuration. + */ +public class ConfigDeployUtil { + + private static Properties deploymentProps = null; + + private static final String DEFAULT = ".default"; + private static final String STRINGVALS = ".stringvals"; + private static final String MAX = ".max"; + private static final String MIN = ".min"; + private static final String DESCRIPTION = ".description"; + private static final String TYPE = ".type"; + + public static String getString(String parameter) + throws ConfigurationException + { + String value = get(parameter); + validateString(parameter, value); + return value; + } + + public static String[] getStringArray(String parameter) + throws ConfigurationException + { + String[] value; + value = validateStringArray(parameter, get(parameter)); + return value; + } + + public static int getInt(String parameter) throws ConfigurationException { + int value; + value = validateInt(parameter, get(parameter)); + return value; + } + + public static long getLong(String parameter) throws ConfigurationException + { + long value; + value = validateLong(parameter, get(parameter)); + return value; + } + + public static boolean getBoolean(String parameter) + throws ConfigurationException { + boolean boolValue = false; + String value; + try { + value = get(parameter); + } catch (Exception ex) { + throw new ConfigurationException("parameter value ["+parameter+"] " + +"neither 'true' nor 'false'"); + } + if( value != null && value.equalsIgnoreCase("true") + || value.equalsIgnoreCase("false") ) + { + boolValue = Boolean.parseBoolean(value); + } else { + throw new ConfigurationException("parameter value ["+parameter+"] " + +"neither 'true' nor 'false'"); + } + return boolValue; + } + + public static String getDescription(String parameter) + throws ConfigurationException + { + String value; + if(deploymentProps == null) { + deploymentProps = new Properties(); + loadDeployProps(deploymentProps); + } + value = deploymentProps.getProperty(parameter + DESCRIPTION); + return value; + } + + public static String getType(String parameter) + throws ConfigurationException + { + String value; + if (deploymentProps == null) { + deploymentProps = new Properties(); + loadDeployProps(deploymentProps); + } + value = deploymentProps.getProperty(parameter + TYPE); + return value; + } + + public static String getDefault(String parameter) + throws ConfigurationException + { + String value; + if (deploymentProps == null) { + deploymentProps = new Properties(); + loadDeployProps(deploymentProps); + } + value = deploymentProps.getProperty(parameter + DEFAULT); + if (value == null) { + throw new ConfigurationException + ("deployment parameter not found ["+parameter+"]"); + } + return value; + } + + /** + * Returns a <code>String</code> array whose elments represent the + * lookup service groups to discover. If the system property named + * "federation.name" is set then that value be used; otherwise, + * the deployment properties files will be consulted. + * + * @throws ConfigurationException if the groups cannot be determined. + */ + public static String[] getGroupsToDiscover() throws ConfigurationException + { + String fedNameStr = System.getProperty("federation.name"); + if(fedNameStr == null) { + fedNameStr = getString("federation.name"); + } + return fedNameStr.split(","); + } + + /** + * Returns an array of <code>LookupLocator</code> instances that can + * each be used to discover a specific lookup service. + * + * @throws ConfigurationException if the locators cannot be determined. + */ + public static LookupLocator[] getLocatorsToDiscover() + throws ConfigurationException + { + return new LookupLocator[]{}; + } + + /** + * Returns an instance of the <code>ServiceInfo</code> attribute class, + * initialized to the values specified in the deployment properties + * files. + */ + public static ServiceInfo initServiceInfo(UUID source, String serviceName) + throws SocketException, ConfigurationException + { + ServiceInfo serviceInfo = new ServiceInfo(); + serviceInfo.source = source; + serviceInfo.serviceName = serviceName; + + serviceInfo.inetAddresses = NicUtil.getInetAddressMap(); + + // Get the common token that all services running on the same + // node agree on. Use the MAC address or IP address as default token + String nodeNicName = getString("node.serviceNetwork"); + String nodeIp = NicUtil.getIpAddress(nodeNicName, false); + serviceInfo.nodeToken = NicUtil.getMacAddress(nodeNicName); + if(serviceInfo.nodeToken == null) serviceInfo.nodeToken = nodeIp; + + serviceInfo.nodeId = null;//not set until a node service exists + serviceInfo.nodeName = getString("node.name"); + + serviceInfo.uNumber = getInt("node.uNumber"); + + serviceInfo.rack = getString("node.rack"); + serviceInfo.cage = getString("node.cage"); + serviceInfo.zone = getString("node.zone"); + serviceInfo.site = getString("node.site"); + serviceInfo.region = getString("node.region"); + serviceInfo.geo = getString("node.geo"); + + return serviceInfo; + } + + + private static String get(String parameter) throws ConfigurationException { + String value; + if (deploymentProps == null) { + deploymentProps = new Properties(); + loadDeployProps(deploymentProps); + } + value = deploymentProps.getProperty(parameter); + if (value == null) value = getDefault(parameter); + return value; + } + + private static void validateString(String parameter, String value) + throws ConfigurationException + { + String validValuesStr = + (String) deploymentProps.get(parameter + STRINGVALS); + + if (validValuesStr != null) { + String[] validValues = validValuesStr.split(","); + if (!Arrays.asList(validValues).contains(value)) { + throw new ConfigurationException + ("invalid string parameter ["+parameter+"] in " + +"list ["+validValuesStr+"]"); + } + } + return; + } + + private static String[] validateStringArray(String parameter, String value) + throws ConfigurationException + { + String validValuesStr = + (String)(deploymentProps.get(parameter + STRINGVALS)); + String[] values = value.split(","); + + if (validValuesStr != null) { + String[] validValues = validValuesStr.split(","); + List validValuesList = Arrays.asList(validValues); + for (int i=0; i<values.length; i++) { + if (!validValuesList.contains(values[i])) { + throw new ConfigurationException + ("invalid string parameter ["+parameter+"] in " + +"list "+validValuesList); + } + } + } + return values; + } + + private static int validateInt(String parameter, String strvalue) + throws ConfigurationException + { + String maxString = (String)(deploymentProps.get(parameter + MAX)); + String minString = (String)(deploymentProps.get(parameter + MIN)); + + int value = str2int(strvalue); + + if (maxString != null) { + int max = Integer.parseInt(maxString); + if (value > max) { + throw new ConfigurationException("parameter ["+parameter+"] " + +"exceeds maximum ["+max+"]"); + } + } + return value; + } + + private static long validateLong(String parameter, String strvalue) + throws ConfigurationException + { + String maxString = (String)(deploymentProps.get(parameter + MAX)); + String minString = (String)(deploymentProps.get(parameter + MIN)); + + long value = str2long(strvalue); + + if (maxString != null) { + long max = Long.parseLong(maxString); + if (value > max) { + throw new ConfigurationException("parameter ["+parameter+"] " + +"exceeds maximum ["+max+"]"); + } + } + if (minString != null) { + long min = Long.parseLong(minString); + if (value < min) { + throw new ConfigurationException("parameter ["+parameter+"] " + +"is less than manimum " + +"["+min+"]"); + } + } + return value; + } + + + private static String getPropertiesPath() { + String rootPath = "/opt/bigdata";//real installation + String appHome = System.getProperty("appHome");//pstart + String appDotHome = System.getProperty("app.home");//build.xml + + if(appHome != null) { + rootPath = appHome; + } else if(appDotHome != null) { + rootPath = appDotHome + File.separator + + "dist" + File.separator + "bigdata"; + } + String relPath = "var" + File.separator + "config" + + File.separator + "deploy"; + String retPath = rootPath + File.separator + relPath; + //eclipse + if( !(new File(retPath)).exists() ) { + String tmpPath = "bigdata-jini" + File.separator + "src" + + File.separator + "java" + + File.separator + "com" + + File.separator + "bigdata" + + File.separator + "util" + + File.separator + "config"; + retPath = (new File(tmpPath)).getAbsolutePath(); + } + return retPath; + } + + private static void loadDeployProps(Properties deployProps) { + loadDefaultProps(deployProps); + loadOverrideProps(deployProps); + } + + private static void loadDefaultProps(Properties deployProps) { + FileInputStream fis = null; + try { + String flnm = getPropertiesPath() + + File.separator + "default-deploy.properties"; + fis = new FileInputStream(flnm); + deployProps.load(fis); + } catch (Exception ex) { + ex.printStackTrace(); + } finally { + if (fis != null) { + try { + fis.close(); + } catch (IOException ioex) { /* swallow */ } + } + } + } + + private static void loadOverrideProps(Properties deployProps) { + FileInputStream fis = null; + try { + String flnm = getPropertiesPath() + + File.separator + "deploy.properties"; + fis = new FileInputStream(new File(flnm)); + deployProps.load(fis); + } catch (Exception ex) { + // using all defaults + } finally { + if (fis != null) { + try { + fis.close(); + } catch (IOException ioex) { /* swallow */ } + } + } + } + + + private static int str2int(String argx) { + long l; + + if( argx.trim().equals(Integer.MAX_VALUE) ) return Integer.MAX_VALUE; + if( argx.trim().equals(Integer.MIN_VALUE) ) return Integer.MIN_VALUE; + + l = str2long(argx); + if (l < Integer.MAX_VALUE && l > Integer.MIN_VALUE) { + return (int) l; + } else { + throw new NumberFormatException("Invalid number:"+argx + +" --number out of range"); + } + } + + private static long str2long(String argx) { + + int minDigitNumBetwnComma = 3; + + String arg = argx.trim(); + arg = arg.replaceAll("\"", ""); // strip all... [truncated message content] |
From: <btm...@us...> - 2010-07-26 16:12:52
|
Revision: 3290 http://bigdata.svn.sourceforge.net/bigdata/?rev=3290&view=rev Author: btmurphy Date: 2010-07-26 16:12:45 +0000 (Mon, 26 Jul 2010) Log Message: ----------- [trunk]: trac #115 - Provide a means to setup and tear down a federation running on a single server to support operator directed benchmarking and CI performance tests. [Additional changes to method NicUtil.getIpAddress(systemPropertyName, defaultNic, loopbackOk) and related config files to make it easier to use that method in both deployment and test scrnarios Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/util/config/NicUtil.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/LookupStarter.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/config/lookup.config trunk/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config trunk/bigdata-jini/src/test/com/bigdata/jini/start/testReggie.config trunk/bigdata-jini/src/test/com/bigdata/jini/start/testStartJini.config trunk/bigdata-jini/src/test/com/bigdata/zookeeper/testzoo.config trunk/src/resources/config/jini/reggie.config Modified: trunk/bigdata/src/java/com/bigdata/util/config/NicUtil.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/util/config/NicUtil.java 2010-07-26 15:13:46 UTC (rev 3289) +++ trunk/bigdata/src/java/com/bigdata/util/config/NicUtil.java 2010-07-26 16:12:45 UTC (rev 3290) @@ -507,54 +507,42 @@ /** * Special-purpose convenience method that returns a * <code>String</code> value representing the ip address of - * the current node; where the value that is returned is - * determined according to following criteria: + * the current node. * <p> - * <ul> - * <li> If a non-<code>null</code> value is input for the - * <code>systemPropertyName</code> parameter, then - * this is viewed as a declaration by the caller that - * that the system property with that given value - * should take precedence over all other means of - * determining the desired ip address. As such, this - * method determines if a system property having the - * given has indeed been set and, if it has, returns - * the ip address of the nic having that name; or - * <code>null</code> if there is no nic with the - * desired name installed on the node. - * <li> If a non-<code>null</code> value is input for the - * <code>systemPropertyName</code> parameter, but - * no system property with that name has been set, - * and <code>true</code> has been passed in for - * the <code>fallbackOk</code> parameter, then this - * method will return the IPV4 based address of the - * first reachable nic that is found on the node. - * Upon failing to find such an address, if the - * <code>loopbackOk</code> parameter is also - * <code>true</code>, then this method will return - * the <i>loop back</i> address of the node; otherwise - * <code>null</code> is returned. - * <li> If <code>null</code> is input for the - * <code>systemPropertyName</code> parameter, but - * a non-<code>null</code> value is input for the - * <code>defaultNic</code> parameter, then this - * method returns the ip address of the nic having - * that name; or <code>null</code> if there is no - * nic with the desired default name installed on the - * node. - * <li> If <code>null</code> is input for both the - * <code>systemPropertyName</code> parameter and the - * <code>defaultNic</code> parameter, and if the - * <code>fallbackOk</code> parameter is <code>true</code>, - * then this method will return the IPV4 based address - * of the first reachable nic that is found on the node. - * Upon failing to find such an address, if the - * <code>loopbackOk</code> parameter is also - * <code>true</code>, then this method will return - * the <i>loop back</i> address of the node; otherwise - * <code>null</code> is returned. - * </ul> + * If a non-<code>null</code> value is input for the + * <code>systemPropertyName</code> parameter, then this + * method first determines if a system property with + * name equivalent to the given value has been set and, + * if it has, returns the ip address of the nic whose name + * is equivalent to that system property value; or + * <code>null</code> if there is no nic with the desired + * name installed on the node. * <p> + * If there is no system property whose name is the value + * of the <code>systemPropertyName</code> parameter, and + * if the value "default" is input for the + * <code>defaultNic</code> parameter, then this method + * will return the IPV4 based address of the first reachable + * nic that can be found on the node; otherwise, if a + * non-<code>null</code> value not equal to "default" is + * input for the the <code>defaultNic</code> parameter, + * then this method returns the ip address of the nic + * corresponding to that given name; or <code>null</code> + * if there is no such nic name installed on the node. + * <p> + * If, on the other hand, <code>null</code> is input for + * the <code>systemPropertyName</code> parameter, then + * this method will attempt to find the desired ip address + * using only the value of the <code>defaultNic</code>, + * and applying the same search criteria as described + * above. + * <p> + * Note that in all cases, if <code>true</code> is input + * for the <code>loopOk</code> parameter, then upon failing + * to find a valid ip address using the specified search + * mechanism, this method will return the <i>loop back</i> + * address; otherwise, <code>null</code> is returned. + * <p> * This method can be called from within a configuration * as well as from within program control. * @@ -568,33 +556,24 @@ * the name of the network interface * whose ip address should be returned * if <code>null</code> is input for the + * <code>systemPropertyName</code> parameter, + * or if there is no system property with + * name equivalent the value of the * <code>systemPropertyName</code> parameter. * - * @param fallbackOk if <code>true</code>, then if either - * no system property is set having the - * name referenced by the - * <code>systemPropertyName</code> parameter, - * or if <code>null</code> is input for both - * the <code>systemPropertyName</code> - * parameter and the <code>defaultNic</code> - * parameter, return the IPV4 based address - * of the first reachable network interface - * that can be found on the node. + * @param loopbackOk if <code>true</code>, then return the + * <i>loop back</i> address upon failure + * to find a valid ip address using the + * search criteria specified through the + * <code>systemPropertyName</code> and + * <code>defaultNic</code> parameters. * - * @param loopbackOk if <code>true</code>, and if <code>true</code> - * is also input for the <code>fallbackOk</code> - * parameter, then if this method attempts, - * but fails, to find a valid IPV4 fallback - * address, then the node's <i>loop back</i> - * address is returned. - * * @return a <code>String</code> representing an ip address associated * with the current node; where the value that is returned is * determined according to the criteria described above. */ public static String getIpAddress(String systemPropertyName, String defaultNic, - boolean fallbackOk, boolean loopbackOk) throws SocketException, IOException { @@ -613,32 +592,30 @@ } if(propSet) { return getIpAddress(nicName, 0, loopbackOk); - } else {//desired system property not set, try fallback - if(fallbackOk) { - return getDefaultIpv4Address(loopbackOk); + } else {//system property not set, try default and/or fallback + if(defaultNic != null) { + if( defaultNic.equals("default") ) { + return getDefaultIpv4Address(loopbackOk); + } else { + return getIpAddress(defaultNic, 0, loopbackOk); + } } else { return null; } } - } else {//no system property name provided, try default nic + } else {//no system property name provided, try default if(defaultNic != null) { - return getIpAddress(defaultNic, 0, loopbackOk); - } else {//no default nic provided, try fallback - if(fallbackOk) { + if( defaultNic.equals("default") ) { return getDefaultIpv4Address(loopbackOk); } else { - return null; + return getIpAddress(defaultNic, 0, loopbackOk); } + } else { + return getIpAddress(null, loopbackOk); } } } - public static String getIpAddress() - throws SocketException, IOException - { - return getIpAddress(null, null, true, true); - } - /** * Examines each address associated with each network interface * card (nic) installed on the current node, and returns the Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/LookupStarter.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/LookupStarter.java 2010-07-26 15:13:46 UTC (rev 3289) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/LookupStarter.java 2010-07-26 16:12:45 UTC (rev 3290) @@ -74,8 +74,7 @@ private static String defaultGroup = null; static { try { - thisHost = NicUtil.getIpAddress - ("default.nic", null, true, true); + thisHost = NicUtil.getIpAddress("default.nic", "default", true); defaultGroup = System.getProperty("bigdata.fedname", "bigdata.test.group-"+thisHost); Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/config/lookup.config =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/config/lookup.config 2010-07-26 15:13:46 UTC (rev 3289) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/config/lookup.config 2010-07-26 16:12:45 UTC (rev 3290) @@ -10,7 +10,7 @@ com.sun.jini.reggie { private static exportPort = 0;//randomly chosen port private static exportIpAddr = - NicUtil.getIpAddress("default.nic", null, true, true); + NicUtil.getIpAddress("default.nic", "default", true); private static codebasePort = 23333;//value used if not set by system prop // Public configuration entries Modified: trunk/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config =================================================================== --- trunk/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config 2010-07-26 15:13:46 UTC (rev 3289) +++ trunk/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config 2010-07-26 16:12:45 UTC (rev 3290) @@ -77,7 +77,7 @@ static serviceDir = ConfigMath.getAbsoluteFile(new File(fedname)); private static localIpAddr = - NicUtil.getIpAddress("default.nic", null, true, true); + NicUtil.getIpAddress("default.nic", "default", true); /* * Example cluster configuration. Modified: trunk/bigdata-jini/src/test/com/bigdata/jini/start/testReggie.config =================================================================== --- trunk/bigdata-jini/src/test/com/bigdata/jini/start/testReggie.config 2010-07-26 15:13:46 UTC (rev 3289) +++ trunk/bigdata-jini/src/test/com/bigdata/jini/start/testReggie.config 2010-07-26 16:12:45 UTC (rev 3290) @@ -15,7 +15,7 @@ com.sun.jini.reggie { private static exportIpAddr = - NicUtil.getIpAddress("default.nic", null, true, true); + NicUtil.getIpAddress("default.nic", "default", true); private static exportPort = Integer.parseInt("0"); private static serverILFactory = Modified: trunk/bigdata-jini/src/test/com/bigdata/jini/start/testStartJini.config =================================================================== --- trunk/bigdata-jini/src/test/com/bigdata/jini/start/testStartJini.config 2010-07-26 15:13:46 UTC (rev 3289) +++ trunk/bigdata-jini/src/test/com/bigdata/jini/start/testStartJini.config 2010-07-26 16:12:45 UTC (rev 3290) @@ -15,7 +15,7 @@ private static appHome = System.getProperty("app.home", ConfigMath.getAbsolutePath(new File(".")) ); private static codebaseHost = - NicUtil.getIpAddress("default.nic", null, true, true); + NicUtil.getIpAddress("default.nic", "default", true); private static codebasePort = "23334"; private static defaultCodebaseRootDir = ConfigUtil.concat( new String[] { appHome, "${/}bigdata-jini${/}lib${/}jini${/}lib-dl" } ); Modified: trunk/bigdata-jini/src/test/com/bigdata/zookeeper/testzoo.config =================================================================== --- trunk/bigdata-jini/src/test/com/bigdata/zookeeper/testzoo.config 2010-07-26 15:13:46 UTC (rev 3289) +++ trunk/bigdata-jini/src/test/com/bigdata/zookeeper/testzoo.config 2010-07-26 16:12:45 UTC (rev 3290) @@ -34,7 +34,7 @@ private static appHome = System.getProperty("app.home", ConfigMath.getAbsolutePath(new File(".")) ); private static localIpAddr = - NicUtil.getIpAddress("default.nic", null, true, true); + NicUtil.getIpAddress("default.nic", "default", true); /* A comma delimited list of the known zookeeper servers together * with their assigned "myid". Modified: trunk/src/resources/config/jini/reggie.config =================================================================== --- trunk/src/resources/config/jini/reggie.config 2010-07-26 15:13:46 UTC (rev 3289) +++ trunk/src/resources/config/jini/reggie.config 2010-07-26 16:12:45 UTC (rev 3290) @@ -14,7 +14,8 @@ com.sun.jini.reggie { - private static exportIpAddr = NicUtil.getIpAddress(null, null, true, true); + private static exportIpAddr = + NicUtil.getIpAddress("default.nic", "default", false); private static exportPort = Integer.parseInt( System.getProperty("exportPort", "0") ); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-07-27 15:08:39
|
Revision: 3305 http://bigdata.svn.sourceforge.net/bigdata/?rev=3305&view=rev Author: thompsonbry Date: 2010-07-27 15:08:28 +0000 (Tue, 27 Jul 2010) Log Message: ----------- Merged r2632 through r3304 from LEXICON_REFACTOR_BRANCH into trunk. The LEXICON_REFACTOR_BRANCH is now closed. This commit introduces support for inlining xsd numerics in the statement indices. That physical schema change breaks binary compatibility for the triple store. Since this change touches every tuple in the statement indices, the migration path is to export and import your data. Modified Paths: -------------- trunk/.classpath trunk/bigdata/src/java/com/bigdata/LRUNexus.java trunk/bigdata/src/java/com/bigdata/btree/AbstractBTree.java trunk/bigdata/src/java/com/bigdata/btree/DefaultTupleSerializer.java trunk/bigdata/src/java/com/bigdata/btree/IndexMetadata.java trunk/bigdata/src/java/com/bigdata/btree/IndexSegment.java trunk/bigdata/src/java/com/bigdata/btree/IndexSegmentBuilder.java trunk/bigdata/src/java/com/bigdata/btree/IndexSegmentStore.java trunk/bigdata/src/java/com/bigdata/btree/NodeSerializer.java trunk/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilder.java trunk/bigdata/src/java/com/bigdata/btree/keys/KeyBuilder.java trunk/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java trunk/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java trunk/bigdata/src/java/com/bigdata/io/DirectBufferPool.java trunk/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java trunk/bigdata/src/java/com/bigdata/journal/AbstractJournal.java trunk/bigdata/src/java/com/bigdata/journal/BufferMode.java trunk/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java trunk/bigdata/src/java/com/bigdata/journal/FileMetadata.java trunk/bigdata/src/java/com/bigdata/journal/Options.java trunk/bigdata/src/java/com/bigdata/mdi/IndexPartitionCause.java trunk/bigdata/src/java/com/bigdata/relation/IRelation.java trunk/bigdata/src/java/com/bigdata/relation/accesspath/SameVariableConstraint.java trunk/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java trunk/bigdata/src/java/com/bigdata/relation/rule/IPredicate.java trunk/bigdata/src/java/com/bigdata/relation/rule/Rule.java trunk/bigdata/src/java/com/bigdata/relation/rule/Var.java trunk/bigdata/src/java/com/bigdata/relation/rule/eval/DefaultRuleTaskFactory.java trunk/bigdata/src/java/com/bigdata/relation/rule/eval/RuleState.java trunk/bigdata/src/java/com/bigdata/relation/rule/eval/pipeline/JoinTask.java trunk/bigdata/src/java/com/bigdata/relation/rule/eval/pipeline/LocalJoinTask.java trunk/bigdata/src/java/com/bigdata/service/ndx/RawDataServiceTupleIterator.java trunk/bigdata/src/java/com/bigdata/service/proxy/RemoteAsynchronousIteratorImpl.java trunk/bigdata/src/java/com/bigdata/service/proxy/RemoteChunk.java trunk/bigdata/src/java/com/bigdata/sparse/AbstractAtomicRowReadOrWrite.java trunk/bigdata/src/java/com/bigdata/sparse/SparseRowStore.java trunk/bigdata/src/java/com/bigdata/striterator/IKeyOrder.java trunk/bigdata/src/resources/logging/log4j.properties trunk/bigdata/src/test/com/bigdata/btree/AbstractBTreeTestCase.java trunk/bigdata/src/test/com/bigdata/btree/AbstractIndexSegmentTestCase.java trunk/bigdata/src/test/com/bigdata/btree/TestAll_IndexSegment.java trunk/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilderWithLargeTrees.java trunk/bigdata/src/test/com/bigdata/btree/keys/TestKeyBuilder.java trunk/bigdata/src/test/com/bigdata/relation/accesspath/TestSameVariableConstraint.java trunk/bigdata/src/test/com/bigdata/relation/rule/AbstractRuleTestCase.java trunk/bigdata/src/test/com/bigdata/relation/rule/TestRule.java trunk/bigdata/src/test/com/bigdata/search/TestAll.java trunk/bigdata/src/test/com/bigdata/search/TestSearchRestartSafe.java trunk/bigdata/src/test/com/bigdata/sparse/TestAll.java trunk/bigdata-bsbm/src/resources/bsbm-data/queries/query4.txt trunk/bigdata-rdf/src/java/com/bigdata/rdf/axioms/Axioms.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/axioms/BaseAxioms.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsIterator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesIterator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesPIterator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesPOIterator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesSPIterator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesSPOIterator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainTypeResourceIterator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/FullyBufferedJustificationIterator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/Justification.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/OwlSameAsPropertiesExpandingIterator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/RdfTypeRdfsResourceFilter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/BigdataRDFFullTextIndex.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/ITermIdFilter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/ITermIndexCodes.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Id2TermTupleSerializer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Id2TermWriteProc.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/KVOTermIdComparator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconKeyBuilder.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/ReverseIndexWriterTask.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdTupleSerializer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdWriteProc.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdWriteTask.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/VerifyStatementBuffer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/magic/IMagicTuple.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/magic/IRISUtils.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicAccessPath.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicIndexWriteProc.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicKeyOrder.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicPredicate.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicRelation.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicTuple.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicTupleSerializer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/BNodeContextFactory.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNodeImpl.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataLiteralImpl.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataResourceImpl.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataStatementImpl.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataURIImpl.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValue.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactory.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactoryImpl.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueIdComparator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueImpl.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/StatementEnum.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/TermIdComparator2.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/AbstractRuleDistinctTermScan.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/AbstractRuleFastClosure_11_13.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/AbstractRuleFastClosure_3_5_6_7_9.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/AbstractRuleFastClosure_5_6_7_9.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/BackchainAccessPath.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/DoNotAddFilter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/FastClosure.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/MatchRule.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/RDFJoinNexus.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/RuleFastClosure3.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/DefaultGraphSolutionExpander.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/DistinctMultiTermAdvancer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/DistinctSPOIterator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/DistinctTermAdvancer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/ExplicitSPOFilter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/InGraphBinarySearchFilter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/InGraphHashSetFilter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/InferredSPOFilter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/JustificationRemover.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/JustificationTupleSerializer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/NamedGraphSolutionExpander.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/NoAxiomFilter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/OSPComparator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/POSComparator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOAccessPath.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOComparator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOFilter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexWriteProc.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOKeyOrder.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOPredicate.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPORelation.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOSortKeyBuilder.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOStarJoin.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOTupleSerializer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataSolutionResolverator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataStatementIteratorImpl.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataValueIteratorImpl.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/IRawTripleStore.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/ITripleStore.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/ScaleOutTripleStore.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/TripleStoreUtility.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/vocab/BaseVocabulary.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/vocab/Vocabulary.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/TestAll.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/axioms/TestAxioms.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestAddTerms.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestAll.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestComparators.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestCompletionScan.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestFullTextIndex.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestId2TermTupleSerializer.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestTerm2IdTupleSerializer.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestVocabulary.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/magic/TestIRIS.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/magic/TestMagicKeyOrderStrategy.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/magic/TestMagicStore.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/model/TestAll.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestRDFXMLInterchangeWithStatementIdentifiers.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestAll.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestBackchainOwlSameAsPropertiesIterator.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestBackchainTypeResourceIterator.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestDatabaseAtOnceClosure.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestDistinctTermScan.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestJustifications.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestOptionals.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestOwlSameAsPropertiesExpandingIterator.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestRuleExpansion.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestRuleFastClosure_11_13.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestRuleFastClosure_3_5_6_7_9.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestSlice.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestTruthMaintenance.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestDefaultGraphAccessPath.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPO.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOAccessPath.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOKeyCoders.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOKeyOrder.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOPredicate.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPORelation.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOStarJoin.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOTupleSerializer.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOValueCoders.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/AbstractDistributedTripleStoreTestCase.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/AbstractEmbeddedTripleStoreTestCase.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/AbstractTestCase.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestAll.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestBulkFilter.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreTransactionSemantics.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestRestartSafe.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestStatementIdentifiers.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestTripleStore.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestTripleStoreBasics.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataConstructIterator.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStatistics.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl2.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailHelper.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/FreeTextSearchExpander.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/HitConvertor.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestAll.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEvaluationStrategyImpl.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestJoinScope.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNamedGraphs.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestProvenanceQuery.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestQuery.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSetBinding.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest2.java trunk/bigdata-sails/src/test/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java trunk/bigdata-sails/src/test/org/openrdf/query/parser/sparql/SPARQLQueryTest.java Added Paths: ----------- trunk/bigdata/src/java/com/bigdata/btree/IndexSegmentMultiBlockIterator.java trunk/bigdata/src/test/com/bigdata/btree/TestIndexSegmentMultiBlockIterators.java trunk/bigdata-bsbm/src/resources/bsbm-data/queries/query4-rewritten.txt trunk/bigdata-bsbm/src/test/ trunk/bigdata-bsbm/src/test/benchmark/ trunk/bigdata-bsbm/src/test/benchmark/bigdata/ trunk/bigdata-bsbm/src/test/benchmark/bigdata/TestBSBM.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/ trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/AbstractBNodeIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/AbstractIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/AbstractInlineIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/AbstractLiteralIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/BlobOverflowHandler.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/DTE.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/DTEFlags.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/DefaultExtensionFactory.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/DummyIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/ExtensionIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IDatatypeURIResolver.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IExtension.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IExtensionFactory.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/ILexiconConfiguration.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/LegacyTermIdUtility.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/NullIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/NumericBNodeIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/TermId.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/UUIDBNodeIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/UUIDLiteralIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/VTE.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSD.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDBooleanIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDByteIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDDecimalIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDDoubleIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDFloatIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDIntIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDIntegerIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDLongIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDShortIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/AbstractInlineConstraint.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineEQ.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineGE.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineGT.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineLE.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineLT.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineNE.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/package.html trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/TermIVComparator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/relation/ trunk/bigdata-rdf/src/java/com/bigdata/rdf/relation/rule/ trunk/bigdata-rdf/src/java/com/bigdata/rdf/relation/rule/BindingSetSortKeyBuilder.java trunk/bigdata-rdf/src/samples/ trunk/bigdata-rdf/src/samples/com/ trunk/bigdata-rdf/src/samples/com/bigdata/ trunk/bigdata-rdf/src/samples/com/bigdata/rdf/ trunk/bigdata-rdf/src/samples/com/bigdata/rdf/internal/ trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/ trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/ColorsEnumExtension.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/EpochExtension.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/SampleExtensionFactory.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/TestAll.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/TestEncodeDecodeKeys.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/TestLongLiterals.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/ trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/TestAll.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/TestInlineConstraints.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestInlining.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/magic/TestAll.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/model/TestFactory.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/relation/ trunk/bigdata-rdf/src/test/com/bigdata/rdf/relation/rule/ trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreWithoutInlining.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndPipelineJoinsWithoutInlining.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithSidsWithoutInlining.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestInlineValues.java Removed Paths: ------------- trunk/bigdata/src/architecture/scale-out-perf.txt trunk/bigdata/src/java/com/bigdata/mdi/ReadOnlyMetadataIndexView.java trunk/bigdata/src/java/com/bigdata/relation/rule/BindingSetSortKeyBuilder.java trunk/bigdata/src/java/com/bigdata/service/LocalDataServiceClient.java trunk/bigdata/src/java/com/bigdata/service/LocalDataServiceFederation.java trunk/bigdata/src/java/com/bigdata/service/ndx/DataServiceIndex.java trunk/bigdata/src/test/com/bigdata/service/TestLDS.java trunk/bigdata-bsbm/src/test/benchmark/ trunk/bigdata-bsbm/src/test/benchmark/bigdata/ trunk/bigdata-bsbm/src/test/benchmark/bigdata/TestBSBM.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/VarOrId.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/AbstractBNodeIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/AbstractIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/AbstractInlineIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/AbstractLiteralIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/BlobOverflowHandler.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/DTE.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/DTEFlags.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/DefaultExtensionFactory.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/DummyIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/ExtensionIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IDatatypeURIResolver.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IExtension.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IExtensionFactory.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/ILexiconConfiguration.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/LegacyTermIdUtility.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/NullIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/NumericBNodeIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/TermId.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/UUIDBNodeIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/UUIDLiteralIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/VTE.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSD.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDBooleanIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDByteIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDDecimalIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDDoubleIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDFloatIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDIntIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDIntegerIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDLongIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDShortIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/AbstractInlineConstraint.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineEQ.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineGE.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineGT.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineLE.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineLT.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineNE.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/package.html trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/TermIdComparator.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/relation/rule/ trunk/bigdata-rdf/src/java/com/bigdata/rdf/relation/rule/BindingSetSortKeyBuilder.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/FastRDFValueCoder.java trunk/bigdata-rdf/src/samples/com/ trunk/bigdata-rdf/src/samples/com/bigdata/ trunk/bigdata-rdf/src/samples/com/bigdata/rdf/ trunk/bigdata-rdf/src/samples/com/bigdata/rdf/internal/ trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/ColorsEnumExtension.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/EpochExtension.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/SampleExtensionFactory.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/TestAll.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/TestEncodeDecodeKeys.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/TestLongLiterals.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/ trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/TestAll.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/TestInlineConstraints.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/relation/rule/ trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/AbstractLocalDataServiceFederationTestCase.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreWORM.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestScaleOutTripleStoreWithLocalDataServiceFederation.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestTripleStoreLoadRateLocal.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestTripleStoreLoadRateWithEmbeddedFederation.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestTripleStoreLoadRateWithExistingJiniFederation.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestTripleStoreLoadRateWithJiniFederation.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestTripleStoreLoadRateWithLocalDataServiceFederation.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java Property Changed: ---------------- trunk/ trunk/bigdata-jini/src/java/com/bigdata/attr/ trunk/bigdata-jini/src/java/com/bigdata/disco/ trunk/bigdata-jini/src/java/com/bigdata/disco/config/ trunk/bigdata-jini/src/java/com/bigdata/util/config/ trunk/bigdata-lubm/lib/ trunk/bigdata-lubm/src/resources/ trunk/bigdata-lubm/src/resources/answers (U1)/ trunk/bigdata-lubm/src/resources/config/ trunk/bigdata-lubm/src/resources/logging/ trunk/bigdata-lubm/src/resources/scripts/ trunk/bigdata-perf/ trunk/bigdata-perf/btc/ trunk/bigdata-perf/btc/src/ trunk/bigdata-perf/btc/src/resources/ trunk/bigdata-perf/btc/src/resources/logging/ trunk/bigdata-perf/uniprot/ trunk/bigdata-perf/uniprot/src/ trunk/bigdata-perf/uniprot/src/resources/ trunk/bigdata-perf/uniprot/src/resources/logging/ trunk/bigdata-rdf/src/java/com/bigdata/rdf/util/ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ trunk/dsi-utils/ trunk/dsi-utils/LEGAL/ trunk/dsi-utils/lib/ trunk/dsi-utils/src/ trunk/dsi-utils/src/java/ trunk/dsi-utils/src/java/it/ trunk/dsi-utils/src/java/it/unimi/ trunk/dsi-utils/src/java/it/unimi/dsi/ trunk/dsi-utils/src/java/it/unimi/dsi/compression/ trunk/dsi-utils/src/java/it/unimi/dsi/io/ trunk/dsi-utils/src/java/it/unimi/dsi/util/ trunk/dsi-utils/src/test/ trunk/dsi-utils/src/test/it/ trunk/dsi-utils/src/test/it/unimi/ trunk/dsi-utils/src/test/it/unimi/dsi/ trunk/dsi-utils/src/test/it/unimi/dsi/io/ trunk/dsi-utils/src/test/it/unimi/dsi/util/ trunk/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ trunk/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ trunk/osgi/ trunk/src/resources/bin/config/ Property changes on: trunk ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 + /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 Modified: trunk/.classpath =================================================================== --- trunk/.classpath 2010-07-27 13:16:22 UTC (rev 3304) +++ trunk/.classpath 2010-07-27 15:08:28 UTC (rev 3305) @@ -1,67 +1,69 @@ <?xml version="1.0" encoding="UTF-8"?> <classpath> - <classpathentry kind="src" path="bigdata-rdf/src/java"/> - <classpathentry kind="src" path="dsi-utils/src/java"/> - <classpathentry kind="src" path="bigdata-bsbm/src/java"/> - <classpathentry kind="src" path="bigdata/src/resources/logging"/> - <classpathentry kind="src" path="bigdata-sails/src/samples"/> - <classpathentry kind="src" path="bigdata-jini/src/test"/> - <classpathentry kind="src" path="bigdata-sails/src/java"/> - <classpathentry kind="src" path="bigdata/src/java"/> - <classpathentry kind="src" path="bigdata-rdf/src/test"/> - <classpathentry kind="src" path="bigdata/src/test"/> - <classpathentry kind="src" path="bigdata-sails/src/test"/> - <classpathentry kind="src" path="bigdata-jini/src/java"/> - <classpathentry kind="src" path="bigdata-lubm/src/java"/> - <classpathentry kind="src" path="contrib/src/problems"/> - <classpathentry kind="src" path="bigdata/src/samples"/> - <classpathentry kind="src" path="dsi-utils/src/test"/> - <classpathentry kind="lib" path="bigdata-jini/lib/apache/zookeeper-3.2.1.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/commons-httpclient.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/servlet-api.jar"/> - <classpathentry kind="lib" path="bigdata/lib/dsi-utils-1.0.6-020610.jar"/> - <classpathentry kind="lib" path="bigdata/lib/lgpl-utils-1.0.6-020610.jar"/> - <classpathentry kind="lib" path="bigdata-rdf/lib/nxparser-6-22-2010.jar"/> - <classpathentry kind="src" path="lgpl-utils/src/java"/> - <classpathentry kind="src" path="lgpl-utils/src/test"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/icu/icu4j-3_6.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/unimi/colt-1.2.0.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/ctc_utils-5-4-2005.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/cweb-commons-1.1-b2-dev.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/cweb-extser-0.1-b2-dev.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/cweb-junit-ext-1.1-b3-dev.jar" sourcepath="/junit-ext/src"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/junit-3.8.1.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/apache/log4j-1.2.15.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/icu/icu4jni.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/slf4j-api-1.4.3.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/slf4j-log4j12-1.4.3.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/iris-0.58.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/jgrapht-jdk1.5-0.7.1.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/browser.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/classserver.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/fiddler.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/jini-core.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/jini-ext.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/jsk-lib.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/jsk-platform.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/jsk-resources.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/mahalo.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/mercury.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/norm.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/outrigger.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/reggie.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/start.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/sun-util.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/tools.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/unimi/fastutil-5.1.5.jar"/> - <classpathentry kind="lib" path="bigdata-bsbm/lib/jdom.jar"/> - <classpathentry kind="lib" path="bigdata-bsbm/lib/ssj.jar"/> - <classpathentry kind="lib" path="bigdata/lib/lucene/lucene-analyzers-3.0.0.jar"/> - <classpathentry kind="lib" path="bigdata/lib/lucene/lucene-core-3.0.0.jar"/> - <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/> - <classpathentry kind="lib" path="bigdata-rdf/lib/openrdf-sesame-2.3.0-onejar.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/sesame-sparql-testsuite-2.3.0.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/sesame-store-testsuite-2.3.0.jar"/> - <classpathentry kind="lib" path="bigdata/lib/high-scale-lib-v1.1.2.jar"/> - <classpathentry kind="output" path="bin"/> + <classpathentry path="bigdata-rdf/src/java" kind="src"/> + <classpathentry path="bigdata-rdf/src/samples" kind="src"/> + <classpathentry path="bigdata-bsbm/src/test" kind="src"/> + <classpathentry path="dsi-utils/src/java" kind="src"/> + <classpathentry path="bigdata-bsbm/src/java" kind="src"/> + <classpathentry path="bigdata/src/resources/logging" kind="src"/> + <classpathentry path="bigdata-sails/src/samples" kind="src"/> + <classpathentry path="bigdata-jini/src/test" kind="src"/> + <classpathentry path="bigdata-sails/src/java" kind="src"/> + <classpathentry path="bigdata/src/java" kind="src"/> + <classpathentry path="bigdata-rdf/src/test" kind="src"/> + <classpathentry path="bigdata/src/test" kind="src"/> + <classpathentry path="bigdata-sails/src/test" kind="src"/> + <classpathentry path="bigdata-jini/src/java" kind="src"/> + <classpathentry path="bigdata-lubm/src/java" kind="src"/> + <classpathentry path="contrib/src/problems" kind="src"/> + <classpathentry path="bigdata/src/samples" kind="src"/> + <classpathentry path="dsi-utils/src/test" kind="src"/> + <classpathentry path="bigdata-jini/lib/apache/zookeeper-3.2.1.jar" kind="lib"/> + <classpathentry path="bigdata-sails/lib/commons-httpclient.jar" kind="lib"/> + <classpathentry path="bigdata-sails/lib/servlet-api.jar" kind="lib"/> + <classpathentry path="bigdata/lib/dsi-utils-1.0.6-020610.jar" kind="lib"/> + <classpathentry path="bigdata/lib/lgpl-utils-1.0.6-020610.jar" kind="lib"/> + <classpathentry path="bigdata-rdf/lib/nxparser-6-22-2010.jar" kind="lib"/> + <classpathentry path="lgpl-utils/src/java" kind="src"/> + <classpathentry path="lgpl-utils/src/test" kind="src"/> + <classpathentry path="bigdata/lib/icu/icu4j-3_6.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata/lib/unimi/colt-1.2.0.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata/lib/ctc_utils-5-4-2005.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata/lib/cweb-commons-1.1-b2-dev.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata/lib/cweb-extser-0.1-b2-dev.jar" exported="true" kind="lib"/> + <classpathentry sourcepath="/junit-ext/src" path="bigdata/lib/cweb-junit-ext-1.1-b3-dev.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata/lib/junit-3.8.1.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata/lib/apache/log4j-1.2.15.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata/lib/icu/icu4jni.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-rdf/lib/slf4j-api-1.4.3.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-rdf/lib/slf4j-log4j12-1.4.3.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-rdf/lib/iris-0.58.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-rdf/lib/jgrapht-jdk1.5-0.7.1.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/browser.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/classserver.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/fiddler.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/jini-core.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/jini-ext.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/jsk-lib.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/jsk-platform.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/jsk-resources.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/mahalo.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/mercury.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/norm.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/outrigger.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/reggie.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/start.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/sun-util.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-jini/lib/jini/lib/tools.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata/lib/unimi/fastutil-5.1.5.jar" exported="true" kind="lib"/> + <classpathentry path="bigdata-bsbm/lib/jdom.jar" kind="lib"/> + <classpathentry path="bigdata-bsbm/lib/ssj.jar" kind="lib"/> + <classpathentry path="bigdata/lib/lucene/lucene-analyzers-3.0.0.jar" kind="lib"/> + <classpathentry path="bigdata/lib/lucene/lucene-core-3.0.0.jar" kind="lib"/> + <classpathentry path="org.eclipse.jdt.launching.JRE_CONTAINER" kind="con"/> + <classpathentry path="bigdata-rdf/lib/openrdf-sesame-2.3.0-onejar.jar" kind="lib"/> + <classpathentry path="bigdata-sails/lib/sesame-sparql-testsuite-2.3.0.jar" kind="lib"/> + <classpathentry path="bigdata-sails/lib/sesame-store-testsuite-2.3.0.jar" kind="lib"/> + <classpathentry path="bigdata/lib/high-scale-lib-v1.1.2.jar" kind="lib"/> + <classpathentry path="bin" kind="output"/> </classpath> Deleted: trunk/bigdata/src/architecture/scale-out-perf.txt =================================================================== --- trunk/bigdata/src/architecture/scale-out-perf.txt 2010-07-27 13:16:22 UTC (rev 3304) +++ trunk/bigdata/src/architecture/scale-out-perf.txt 2010-07-27 15:08:28 UTC (rev 3305) @@ -1,3599 +0,0 @@ -Notes on various things. Mostly free form and in temporal order, -with occasional efforts to collate by topic. - -============================================================ - -Notes on store level record checksums and record compression. - -1. many record compression schemes will fail if the data are corrupt, - but logically you compress first and then checksum the record. - - compression is often a technique using a stream of blocks. - - checksum is a streaming technique. - - // IRawStore#write() - write(ByteBuffer b) : addr - - // AbstractJournal#write() - - if(compress) { - - b = compress(b) - - } - - int chksum; - if(useChecksum) { - - bytesRequired = b.remaining() + 4; - - chmsum = computeChecksum( b ); - - } else bytesRequired = b.remaining(); - - bufferStrategy.write(b,chksum,useChecksum) - - Note: buffer strategy write() probably needs to have the checksum - value pass along in addition to the record to avoid re-allocation - of the ByteBuffer just to tack on the additional 4 bytes. We could - either always write those additional 4 bytes or optionally write - them if checksums are enabled. - -2. the root block needs to hold the critical data indicating whether - or not checksums in use and what record compression technique, if - any, to apply. We need this on hand before we can either read or - write a record on the store. - -3. we need 4 bytes (int32) for the checksum. this should be at the - end of the record, so the size in the store is extended by 4 bytes - and the address for the record on the store is adjusted to also - include those 4 bytes. However, when you read from the store it - will give you a slice WITHOUT those four bytes. Further, if it is - using compression then it will decompress the slice, resulting in a - new slice that can be much larger than the record on the store - whose size is encoded within the address. This will probably break - a variety of asserts that assume that the returned ByteBuffer will - be exactly the size of the byte count encoded in the address. - -4. Compression should run on the byte[] not on the slower ByteBuffer. - Serialization generally writes on a byte[], but sometimes that is - wrapped up as a ByteBuffer - and it can even be a slice() onto a - larger array (NodeSerializer does this since it returns a view onto - an internal buffer). - - /** - * The {@link Adler32} checksum. This is an int32 value, even through the - * {@link Checksum} API returns an int64 (aka long integer) value. The - * actual checksum is in the lower 32 bit. - */ - static final int SIZEOF_CHECKSUM = Bytes.SIZEOF_INT; - - /** - * Offset of the int32 value that is the {@link Adler32} checksum of the - * serialized node or leaf. The checksum is computed for all bytes exclusing - * the first 4 bytes, on which the value of the computed checksum is - * written. - */ - static final int OFFSET_CHECKSUM = 0; - - /** - * When <code>true</code>, checksums will be generated for serialized - * nodes and leaves and verified on read. Checksums provide a check for - * corrupt media and make the database more robust at the expense of some - * added cost to compute a validate the checksums. - * <p> - * Computing the checksum is ~ 40% of the cost of (de-)serialization. - * <p> - * When the backing store is fully buffered (it is entirely in RAM) then - * checksums are automatically disabled. - * - * @deprecated See {@link #setUseChecksum(boolean)} - */ - public final boolean getUseChecksum() {return useChecksum;} - -============================================================ -Consider block storage / NAS deployment scenario. - - In this scenario we use robust remote storage with a large block - size (64M+). The backing store could be HFS or NAS. Only large - chunks are read at a time. Index store writes are also large - chunks. Commit point writes will require partial chunk updates - unless we stream to the failover services at the commit point and - only write to the remote store when we close the journal for writes. - - Use a direct buffer for the live journal, no write cache. - - Use resource lock manager (ala zookeeper) to allocate journal files - on the block store. Commit points write through so that commits are - visible to failover services (or are streamed through to those - services). The initial allocation is for the entire target extent. - Avoid the need to extent by eager overflow as coordinated with the - write executor service. - - Use resource lock manager when obtaining an historical journal or - index segment store. Copy the entire thing to local disk and manage - the data locally that will be used by the server. - -============================================================ -Consider a RW store variant. - - A read-write store would be useful for scale-up deployments where - access to historical commit points is not required. If you require - access to historical commit points, then a read-write store will not - satisify since it will release the storage associated with deleted - records no later than the commit point. - - Only the WORM allows access to those historical commit points. This - places a RW store somewhat at odds with the rest of the bigdata - architecture since we presume that operations read based on a commit - time and that the commit time can be mapped onto a commit point. - - Regardless, a read-write store would use bit maps for allocations. - The handles to the bit maps could be stored in the root block addrs. - Deletes would be automatic for B+Tree nodes and leaves and in the - hands of the application for low-level record writes. - - Each allocation block would handle records in a certain size range. - E.g., LT 512, LT 1024, LT 4096, etc. The block sizes will tend to - be fairly large since records are nearly always B+Tree nodes or - leaves. (It may be possible to dynamically determine the best set - of block sizes). - - For a given block size, we probably need a chain of allocation - blocks. This is necessary to handle on the one handle widely - varying #s of allocations by block size and on the other to not - place a limit on how many allocations there may be of a given block - size. This also lets us keep down the size of the allocation block - and hence the amount of metadata that needs to be written on each - commit. - - The root block could just hold the addr of the allocation block - chains record. - - Is it possible to efficiently persist only the incremental change to - a bit vector? If the changes are from a specific offset on? - -============================================================ - - - Tune indices - - - The ids index should benefit from value compression since the - values are the serialized terms. This will require custom - code to break the values into symbols and then use huffman - encoding. Alternatively, simply treat each value as a symbol - and code from that (assuming that value reuse is common - if - not then at least URIs can be broken down into common - symbols). - - - The terms (term:id) index is on the order of 5x larger than - the ids (id:term) index. Presumably this is because updates - are distributed more or less randomly across the terms index - as new terms become defined but are strictly append only for - the ids index since new ids are always larger than old ids. - - - A larger branching factor may benefit the ids index. - - - A compacting merge of the terms index should greatly reduce - its size. - - - Splitting the terms2ids index will allocate more resources - and greater parallelism to that index. - - - Prefix compression may reduce the problem (it is enabled as - of 10/1/08). - - - Nearly ALL _read_ time between the SPO and TERMS index is - reading the TERMS index (99%). - - - Nearly ALL _write_ time between the SPO and the TERMS index - is writing the SPO index (99%). Statements are more likely - to be distinct than terms, so it makes sense that we write - on the statement index more often. However, note that this - is true even though the TERMS index is 3x larger than the - SPO index. - - - BTree - - - Modify to allow a purely transient BTree using ONLY hard - references. This variant does not require a backing store. - The parent and child references should be HARD - references. Deleted nodes and leaves will be GC'd once they are - evicted from the write retention queue. - - - Per-child locks : try using the childRef for this. Add - synchronized(node) to ensure that the childRef[i] is non-null - and then synchronized(childRef[i]) for the lock itself. - - - The RecordCompressor as utilized by the NodeSerializer is NOT - thread-safe as it relies on a single cbuf field. Either the - static buffer pool (if direct buffers are performant for this), - a heap buffer pool, dynamic heap allocations for - (de-)compression, or a serialized access to an instance per - NodeSerializer instance (and hence per BTree instance). - - - Change checksums to be at the store/record level. Interpret - the record length as having 2 additional bytes for read/write - of the checksum. Put it at the end of the record. - Enable/disable at the store level. - - Add an option for read-back validation of writes? - - Add an option for a fully synchronized raw store interface on - the Journal? - - - IAutoboxBTree - - - Write tests of the autobox API for BTree, FusedView, - ClientIndexView, and DataServiceIndex. - - - Should be able to instantiate a resource that is a BigdataMap - or BigdataSet, so perhaps make these classes extend - AbstractResource? Same for SparseRowStore? - - - Need [isNull] for ClientIndexView and DataServiceIndex impls - to reconstruct the object by allowing reconstruction of the - ITuple. - - Could modify the ResultBuffer to provide this additional - information as an option and specify an appropriate - constructor for the point test to get back that metadata. - - Really, should define crudTuple() methods and rework the - batch crud methods in terms of tuples. That is the general - framwork. Bit flags can be used to indicate that certain - information (keys, vals, etc). are not required for a given - op. (keys are always available on the client for these ops - so there is never a need to send them with the data: just - {val, isNull, isDeleted} and the option to read deleted - tuples. - - BigdataMap and BigdataSet will not scale-out until this issue - is resolved. - -============================================================ -Lock contention - -There are two places where we get a modest amount of lock contention, -based on -Djrockit.lockprofiling=true. - - - Node#getChild(int) - - This is the gateway for access to child nodes. It currently uses - double-checked locking to avoid synchronization when the desired - child is already available. If the child needs to be read from the - disk then the threads are synchronized in order to prevent - concurrent requests from reading the same record, deserializing the - node or leaf, and then entering different references for that node - or leaf into the childRefs[]. - - We could allow concurrent resolution of a child that is not in - memory if we used an AtomicReference[] and stored the weak - references in that and using an atomic operation to set the - reference iff it was not already set. This would require code - changes everywhere we use childRefs[] and it is a long shot that - this would improve performance at all. - - - DiskOnlyStrategy#reopenChannel(). Contention here arises when an - interrupt is recognized during an IO operation and the file channel - is closed. Readers will transparently re-open the channel, but - they all want to do so at once which causes contention for this - method. I don't see anyway to improve on this off hand. - -============================================================ -Sparse Row Store (SRS) - - - Extract IRowStore interface. - - - Optimize get(Schema,name) in SparseRowStore and - AtomicRowWriteRead. - - - AutoIncrement semantics: - - - You can't use an autoinc counter for a primary key. However, - this could be done if we use the maximum value for the auto-inc - counter to form the fromKey, which would direct the procedure to - the correct index partition. We would need to notice that the - property value was an auto-increment counter and the fromKey - would then have to be regenerated on the index partition. We - would then lookup the current value for that counter and write - its successor into the row store. - - The inability to use auto-inc for the primary key is really - quite limiting. - - - Since we have all these successor semantics, we should be able - to support auto-increment for more than just int and long. A - better way to form the successor is to pass along an - IAutoIncrement method and let it form the successor. That - allows all kinds of interesting patterns. - - - JSON API - - - Add a web application that let's people easily write on or read - from the sparse row store using JSON or the like. - - - It should be easy to view and modify the data in the global row - store, which is where locatable resources store their - configuration metadata. - - - The JSON API should be compatible to the extent possible with - HBASE and GAE. - - - This web application will have to be distributed in order to - eliminate bottlenecks. One approach is to re-direct HTTP - clients to an embedded web service co-located with the data - service on which the row resides. HTTP clients can continue to - address that server until they receive a redirect. - - - (On reflection, I have decided NOT to go this route.) Add a BLOB - reference column type. There are at least two design options for - this. I think that we should support at least (1) and (2). - - (1) the blocks are stored locally and hence are always - available from the same data service as the BLOB reference - column value - this might limit the maximum effective blob - size (in bytes) since the data will have to fit in the same - index partition and hence be co-located on a single host. - In fact, the blocks will be in the same index segment as - the column value once the journal overflows. One advantage - of this approach is the block updates can be atomic with - block metadata updates - a feature that is not otherwise - available without full transactions. - - (2) the blob reference includes the name of the scale-out index - on which the blocks for that blob are stored - in this - model the blocks can reside anywhere and splits of the - blocks will have no relevance to splits of the metadata. - This also makes it easier to locate the partitions of the - index containing the blocks on data services that are - specifically provisioned for large data blocks. - - (3) the blob reference contains the primary key for the blob - and the blob is stored either in the same index or in - another index. I am not sure that this variant adds value - over (1) and (2). - - - Refactor the BigdataRepository to use the BLOB reference type. - -============================================================ -Bigdata File System (BFS) - - - (***) Test failures remain for TestFileMetadataIndex. It seems as - if the logic was bad previously. If we delete the file Version - then how can we increment it and get a different answer? Maybe we - should delete the Id since that would leave the version alone? - No, that does not work since all property values will be - overwritten. How about either writing a "deleted" flag or doing - some fancy steps to find the most recent non-deleted version and - then increment that? - - Bug fix for create/delete/update interaction with Version counter - in the row store. - - - test_delete failure - - - test_createUpdate failure - - - handle overflow of blocks to the index segments during MOVE - - - provide streaming socket api on data service for reading blocks - (low level in the DiskOnlyStrategy - if in the write cache then - return directly else return buffered input stream reading on the - disk file and interrupt if journal is closed). - - - range delete - - - logical row scan for headers of documents in a key range. - - - Write test for forward and reverse scans starting at the fence - posts around a partition boundary. - -============================================================ -Map/Reduce demos (**) - - - Rework the map/reduce implementation to use local writes and - distributed gathers. - - - Download, prepare, extract. - - - Concurrent RDF data load as a map/reduce job. - - - Try jini federation using only the terms index to assign - consistent term identifiers, bulk loading into local SPO-only - indices, and then range partitioning the indices into global - SPO, POS, and OSP orders and bulk loading the global statement - indices. The data loader should be concurrent and a filter - should be applied such that each "host" loads only the files - that hash MOD N to that host. (note that only AddTerms and - AddIds go across the network API in this case.) - -============================================================ -Tune network IO - - - huffman encoding is appropriate for network IO, but hu-tucker is - not required since we have to decompress keys to get them - inserted into the btree. - - - tokenization needs to be specified for RDF Value types for the - purposes of compression. In fact, we are guarenteed that values - are NOT duplicated in a given batch so tokenization needs to - uncover common symbols. This is easy for URIs but less so for - literals and impossible for BNodes (which do not rea... [truncated message content] |
From: <tho...@us...> - 2010-07-27 16:42:39
|
Revision: 3309 http://bigdata.svn.sourceforge.net/bigdata/?rev=3309&view=rev Author: thompsonbry Date: 2010-07-27 16:42:33 +0000 (Tue, 27 Jul 2010) Log Message: ----------- moved lubm into bigdata-perf. Added Paths: ----------- trunk/bigdata-perf/bigdata-lubm/ trunk/bigdata-perf/bsbm/ trunk/bigdata-perf/lubm/ Removed Paths: ------------- trunk/bigdata-lubm/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-07-27 17:04:14
|
Revision: 3312 http://bigdata.svn.sourceforge.net/bigdata/?rev=3312&view=rev Author: thompsonbry Date: 2010-07-27 17:04:08 +0000 (Tue, 27 Jul 2010) Log Message: ----------- moved bigdata-bsbm into bigdata-perf. Added Paths: ----------- trunk/bigdata-perf/bigdata-bsbm/ Removed Paths: ------------- trunk/bigdata-bsbm/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-07-27 21:30:54
|
Revision: 3331 http://bigdata.svn.sourceforge.net/bigdata/?rev=3331&view=rev Author: thompsonbry Date: 2010-07-27 21:30:48 +0000 (Tue, 27 Jul 2010) Log Message: ----------- This is a bigdata (R) snapshot release. This release is capable of loading 1B triples in under one hour on a 15 node cluster and has been used to load up to 13B triples on the same cluster. JDK 1.6 is required. See [1] for instructions on installing bigdata(R), [2] for the javadoc and [3] and [4] for news, questions, and the latest developments. For more information about SYSTAP, LLC and bigdata, see [5]. Please note that we recommend checking out the code from SVN using the tag for this release. The code will build automatically under eclipse. You can also build the code using the ant script. The cluster installer requires the use of the ant script. You can checkout this release from the following URL: https://bigdata.svn.sourceforge.net/svnroot/bigdata/branches/BIGDATA_RELEASE_0_83_1 New features: - Inlining XSD numerics, xsd:boolean, or custom datatype extensions into the statement indices. Inlining provides a smaller footprint and faster queries for data using XSD numeric datatypes. In order to introduce inlining we were forced to make a change in the physical schema for the RDF database which breaks binary compatibility for existing stores. The recommended migration path is to export the data and import it into a new bigdata instance. - Refactor of the dynamic sharding mechanism for higher performance. - The SparseRowStore has been modified to make Unicode primary keys decodable by representing Unicode primary keys using UTF8 rather than Unicode sort keys. This change also allows the SparseRowStore to work with the JDK collator option which embeds nul bytes into Unicode sort keys. This change breaks binary compatibility, but there is an option for historical compatibility. The roadmap for the next releases include: - Query optimizations; - Support for high-volume analytic query workloads and SPARQL aggregations; - High availability for the journal and the cluster; - Simplified deployment, configuration, and administration for clusters. For more information, please see the following links: [1] http://bigdata.wiki.sourceforge.net/GettingStarted [2] http://www.bigdata.com/bigdata/docs/api/ [3] http://sourceforge.net/projects/bigdata/ [4] http://www.bigdata.com/blog [5] http://www.systap.com/bigdata.htm About bigdata: Bigdata?\194?\174 is a horizontally-scaled, general purpose storage and computing fabric for ordered data (B+Trees), designed to operate on either a single server or a cluster of commodity hardware. Bigdata?\194?\174 uses dynamically partitioned key-range shards in order to remove any realistic scaling limits - in principle, bigdata?\194?\174 may be deployed on 10s, 100s, or even thousands of machines and new capacity may be added incrementally without requiring the full reload of all data. The bigdata?\194?\174 RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), and datum level provenance. Modified Paths: -------------- trunk/build.properties Added Paths: ----------- trunk/bigdata/src/releases/RELEASE_0_83_1.txt Added: trunk/bigdata/src/releases/RELEASE_0_83_1.txt =================================================================== --- trunk/bigdata/src/releases/RELEASE_0_83_1.txt (rev 0) +++ trunk/bigdata/src/releases/RELEASE_0_83_1.txt 2010-07-27 21:30:48 UTC (rev 3331) @@ -0,0 +1,62 @@ +This is a bigdata (R) snapshot release. This release is capable of loading 1B +triples in under one hour on a 15 node cluster and has been used to load up to +13B triples on the same cluster. JDK 1.6 is required. + +See [1] for instructions on installing bigdata(R), [2] for the javadoc and [3] +and [4] for news, questions, and the latest developments. For more information +about SYSTAP, LLC and bigdata, see [5]. + +Please note that we recommend checking out the code from SVN using the tag for +this release. The code will build automatically under eclipse. You can also +build the code using the ant script. The cluster installer requires the use of +the ant script. You can checkout this release from the following URL: + + https://bigdata.svn.sourceforge.net/svnroot/bigdata/branches/BIGDATA_RELEASE_0_83_1 + +New features: + +- Inlining XSD numerics, xsd:boolean, or custom datatype extensions + into the statement indices. Inlining provides a smaller footprint + and faster queries for data using XSD numeric datatypes. In order + to introduce inlining we were forced to make a change in the + physical schema for the RDF database which breaks binary + compatibility for existing stores. The recommended migration path + is to export the data and import it into a new bigdata instance. + +- Refactor of the dynamic sharding mechanism for higher performance. + +- The SparseRowStore has been modified to make Unicode primary keys + decodable by representing Unicode primary keys using UTF8 rather + than Unicode sort keys. This change also allows the SparseRowStore + to work with the JDK collator option which embeds nul bytes into + Unicode sort keys. This change breaks binary compatibility, but + there is an option for historical compatibility. + +The roadmap for the next releases include: + +- Query optimizations; + +- Support for high-volume analytic query workloads and SPARQL aggregations; + +- High availability for the journal and the cluster; + +- Simplified deployment, configuration, and administration for clusters. + +For more information, please see the following links: + +[1] http://bigdata.wiki.sourceforge.net/GettingStarted +[2] http://www.bigdata.com/bigdata/docs/api/ +[3] http://sourceforge.net/projects/bigdata/ +[4] http://www.bigdata.com/blog +[5] http://www.systap.com/bigdata.htm + +About bigdata: + +Bigdata\xAE is a horizontally-scaled, general purpose storage and computing fabric +for ordered data (B+Trees), designed to operate on either a single server or a +cluster of commodity hardware. Bigdata\xAE uses dynamically partitioned key-range +shards in order to remove any realistic scaling limits - in principle, bigdata\xAE +may be deployed on 10s, 100s, or even thousands of machines and new capacity may +be added incrementally without requiring the full reload of all data. The bigdata\xAE +RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), +and datum level provenance. Property changes on: trunk/bigdata/src/releases/RELEASE_0_83_1.txt ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: trunk/build.properties =================================================================== --- trunk/build.properties 2010-07-27 21:28:28 UTC (rev 3330) +++ trunk/build.properties 2010-07-27 21:30:48 UTC (rev 3331) @@ -36,7 +36,7 @@ release.dir=ant-release # The build version (note: 0.82b -> 0.82.0) -build.ver=0.83.0 +build.ver=0.83.1 build.ver.osgi=0.83 # Set true to do a snapshot build. This changes the value of ${version} to This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-07-28 15:57:18
|
Revision: 3334 http://bigdata.svn.sourceforge.net/bigdata/?rev=3334&view=rev Author: thompsonbry Date: 2010-07-28 15:57:11 +0000 (Wed, 28 Jul 2010) Log Message: ----------- Modified the NanoSparqlServer to support URLs containing a non-default namespace such as http://localhost:80/namespace/LUBM_U50. This was done so we can start the NanoSparqlServer regardless of whether a KB exists at the default namespace. I might make a few more changes along these lines to further clean things up. This change moves the resolution of the AbstractTripleStore, the BigdataSail, etc. inside of the query execution. I have not yet determined whether this imposes any significant overhead when benchmarking. NanoSparqlServer now obtains a read-only transaction when it starts as of the lastCommitTime on the bigdata instance. This is not really ideal because there is little point to starting it first if we are going to be reading from an empty commit point so I may revisit this point later (perhaps always reading from the lastCommitTime unless the URL includes "/namespace/timestamp", in which case we read from that timestamp using a full transaction). Also, it is possible that abrupt termination of the JVM process (kill -9) could fail to release the read-only transaction and thus keep history from being released. Modified Paths: -------------- trunk/bigdata-perf/lubm/build.properties trunk/bigdata-perf/lubm/src/resources/config/config.kb.sparql trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepository.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepositoryConnection.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlClient.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java Property Changed: ---------------- trunk/bigdata-perf/lubm/ Property changes on: trunk/bigdata-perf/lubm ___________________________________________________________________ Modified: svn:ignore - ant-build *result.txt + ant-build *result.txt sample.query.sparql Modified: trunk/bigdata-perf/lubm/build.properties =================================================================== --- trunk/bigdata-perf/lubm/build.properties 2010-07-28 13:07:06 UTC (rev 3333) +++ trunk/bigdata-perf/lubm/build.properties 2010-07-28 15:57:11 UTC (rev 3334) @@ -46,7 +46,7 @@ lubm.namespace=LUBM_U${lubm.univ} # Laptop benchmark data directory. -#lubm.baseDir=d:/bigdata-perf-analysis/lubm/lubm_${lubm.univ} +#lubm.baseDir=d:/bigdata-perf-analysis/lubm/U${lubm.univ} # Server benchmark directory. #lubm.baseDir=/nas/data/lubm/U${lubm.univ} # Windows Server 2008 benchmark data directory. @@ -78,7 +78,7 @@ lubm.journalPropertyFile=${journalMode}Store.properties # The name of the file used for the journal. -#lubm.journalFile=${lubm.baseDir}/U${lubm.univ}/bigdata-lubm.${journalMode}.jnl +#lubm.journalFile=${lubm.baseDir}/bigdata-lubm.${journalMode}.jnl # Note: This is on the large volume. #lubm.journalFile=/data/lubm/U${lubm.univ}/bigdata-lubm.${journalMode}.jnl # SSD. Modified: trunk/bigdata-perf/lubm/src/resources/config/config.kb.sparql =================================================================== --- trunk/bigdata-perf/lubm/src/resources/config/config.kb.sparql 2010-07-28 13:07:06 UTC (rev 3333) +++ trunk/bigdata-perf/lubm/src/resources/config/config.kb.sparql 2010-07-28 15:57:11 UTC (rev 3334) @@ -8,4 +8,7 @@ class=edu.lehigh.swat.bench.ubt.bigdata.SparqlRepositoryFactory ontology=ignored data=ignored +# Use the default namespace specified to NanoSparqlServer database=http://localhost:80 +# Use a specific namespace regardless of the default specified to NanoSparqlServer +#database=http://localhost:80/namespace/LUBM_U50 Modified: trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepository.java =================================================================== --- trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepository.java 2010-07-28 13:07:06 UTC (rev 3333) +++ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepository.java 2010-07-28 15:57:11 UTC (rev 3334) @@ -21,12 +21,17 @@ return ((BigdataSail) getSail()).getDatabase(); } + + @Override + public BigdataSail getSail() { + return (BigdataSail)super.getSail(); + } - private BigdataSail getBigdataSail() { - - return (BigdataSail) getSail(); - - } +// private BigdataSail getBigdataSail() { +// +// return (BigdataSail) getSail(); +// +// } @Override public SailRepositoryConnection getConnection() throws RepositoryException { @@ -55,7 +60,7 @@ throws RepositoryException { return new BigdataSailRepositoryConnection(this, - getBigdataSail().getReadOnlyConnection()); + getSail().getReadOnlyConnection()); } /** @@ -69,7 +74,7 @@ throws RepositoryException { return new BigdataSailRepositoryConnection(this, - getBigdataSail().getReadOnlyConnection(timestamp)); + getSail().getReadOnlyConnection(timestamp)); } @@ -79,7 +84,7 @@ try { return new BigdataSailRepositoryConnection(this, - getBigdataSail().getReadWriteConnection()); + getSail().getReadWriteConnection()); } catch (IOException e) { @@ -95,7 +100,7 @@ try { return new BigdataSailRepositoryConnection(this, - getBigdataSail().getUnisolatedConnection()); + getSail().getUnisolatedConnection()); } catch (InterruptedException e) { Modified: trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepositoryConnection.java =================================================================== --- trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepositoryConnection.java 2010-07-28 13:07:06 UTC (rev 3333) +++ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepositoryConnection.java 2010-07-28 15:57:11 UTC (rev 3334) @@ -42,8 +42,15 @@ } + @Override + public BigdataSailRepository getRepository() { + return (BigdataSailRepository)super.getRepository(); + } + /** - * Overriden to capture query hints from SPARQL queries. Query hints are + * {@inheritDoc} + * <p> + * Overridden to capture query hints from SPARQL queries. Query hints are * embedded in query strings as namespaces. * See {@link BD#QUERY_HINTS_NAMESPACE} for more information. */ @@ -61,7 +68,9 @@ } /** - * Overriden to capture query hints from SPARQL queries. Query hints are + * {@inheritDoc} + * <p> + * Overridden to capture query hints from SPARQL queries. Query hints are * embedded in query strings as namespaces. * See {@link BD#QUERY_HINTS_NAMESPACE} for more information. */ @@ -76,9 +85,11 @@ } /** - * Overriden to capture query hints from SPARQL queries. Query hints are - * embedded in query strings as namespaces. - * See {@link BD#QUERY_HINTS_NAMESPACE} for more information. + * {@inheritDoc} + * <p> + * Overridden to capture query hints from SPARQL queries. Query hints are + * embedded in query strings as namespaces. See + * {@link BD#QUERY_HINTS_NAMESPACE} for more information. */ @Override public SailBooleanQuery prepareBooleanQuery(final QueryLanguage ql, @@ -91,7 +102,9 @@ } /** - * Overriden to capture query hints from SPARQL queries. Query hints are + * {@inheritDoc} + * <p> + * Overridden to capture query hints from SPARQL queries. Query hints are * embedded in query strings as namespaces. * See {@link BD#QUERY_HINTS_NAMESPACE} for more information. */ @@ -121,6 +134,8 @@ } /** + * {@inheritDoc} + * <p> * Note: auto-commit is an EXTREMELY bad idea. Performance will be terrible. * The database will swell to an outrageous size. TURN OFF AUTO COMMIT. * Modified: trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlClient.java =================================================================== --- trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlClient.java 2010-07-28 13:07:06 UTC (rev 3333) +++ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlClient.java 2010-07-28 15:57:11 UTC (rev 3334) @@ -275,7 +275,8 @@ final int rc = conn.getResponseCode(); if(rc < 200 || rc >= 300) { - throw new IOException(conn.getResponseMessage()); + throw new IOException(rc + " : " + + conn.getResponseMessage()+" : "+url); } if (log.isDebugEnabled()) { Modified: trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java =================================================================== --- trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java 2010-07-28 13:07:06 UTC (rev 3333) +++ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java 2010-07-28 15:57:11 UTC (rev 3334) @@ -73,15 +73,18 @@ import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.IJournal; +import com.bigdata.journal.ITransactionService; import com.bigdata.journal.ITx; import com.bigdata.journal.Journal; import com.bigdata.journal.TimestampUtility; import com.bigdata.rdf.sail.BigdataSail; import com.bigdata.rdf.sail.BigdataSailGraphQuery; import com.bigdata.rdf.sail.BigdataSailRepository; +import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; import com.bigdata.rdf.sail.bench.NanoSparqlClient.QueryType; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.relation.AbstractResource; +import com.bigdata.service.IBigdataFederation; import com.bigdata.service.jini.JiniClient; import com.bigdata.util.concurrent.DaemonThreadFactory; import com.bigdata.util.httpd.AbstractHTTPD; @@ -95,9 +98,7 @@ * @todo Allow configuration options for the sparql endpoint either as URI * parameters, in the property file, as request headers, or as query hints * using the PREFIX mechanism. - * - * @todo Make each namespace available at <code>/namespace?query=...</code>? - * + * @todo Allow timestamp for server reads as protocol parameter (URL query * parameter or header). * @@ -107,8 +108,6 @@ * as well). * * @todo Add command to kill a running query. - * - * @todo Add command to drop the LRUNexus cache. */ public class NanoSparqlServer extends AbstractHTTPD { @@ -133,16 +132,26 @@ */ static private final String charset = "UTF-8"; +// /** +// * The target Sail. +// */ +// private final BigdataSail sail; +// +// /** +// * The target repository. +// */ +// private final BigdataSailRepository repo; + /** - * The target Sail. + * The configuration object. */ - private final BigdataSail sail; - + private final Config config; + /** - * The target repository. - */ - private final BigdataSailRepository repo; - + * Provides access to the bigdata database. + */ + private final IIndexManager indexManager; + /** * @todo use to decide ASK, DESCRIBE, CONSTRUCT, SELECT, EXPLAIN, etc. */ @@ -163,7 +172,7 @@ final String query; /** The timestamp when the query was accepted (ns). */ final long begin; - public RunningQuery(long queryId,String query,long begin) { + public RunningQuery(long queryId, String query, long begin) { this.queryId = queryId; this.query = query; this.begin = begin; @@ -188,22 +197,16 @@ super(config.port); - // resolve the kb instance of interest. - final AbstractTripleStore tripleStore = (AbstractTripleStore) indexManager - .getResourceLocator().locate(config.namespace, ITx.UNISOLATED); - - if (tripleStore == null) { - - throw new RuntimeException("No such kb: " + config.namespace); - - } + if (config.namespace == null) + throw new IllegalArgumentException(); - // since the kb exists, wrap it as a sail. - sail = new BigdataSail(tripleStore); - - repo = new BigdataSailRepository(sail); - repo.initialize(); - + if(indexManager == null) + throw new IllegalArgumentException(); + + this.config = config; + + this.indexManager = indexManager; + // used to parse qeries. engine = new SPARQLParserFactory().getParser(); @@ -230,13 +233,18 @@ * loaded since they must query each shard for the primary statement * index and the TERM2ID index. */ - protected StringBuilder getKBInfo() { + protected StringBuilder getKBInfo(final String namespace, + final long timestamp) { final StringBuilder sb = new StringBuilder(); + BigdataSailRepositoryConnection conn = null; + try { - final AbstractTripleStore tripleStore = sail.getDatabase(); + conn = getQueryConnection(namespace, timestamp); + + final AbstractTripleStore tripleStore = conn.getTripleStore(); sb.append("class\t = " + tripleStore.getClass().getName() + "\n"); @@ -282,35 +290,29 @@ .getIndexMetadata() .getWriteRetentionQueueCapacity() + "\n"); - sb.append(BigdataSail.Options.STAR_JOINS + "=" + sail.isStarJoins() - + "\n"); + sb.append(BigdataSail.Options.STAR_JOINS + "=" + + conn.getRepository().getSail().isStarJoins() + "\n"); sb.append(AbstractResource.Options.MAX_PARALLEL_SUBQUERIES + "=" + tripleStore.getMaxParallelSubqueries() + "\n"); - /* - * Stuff which is specific to a local/embedded database. - */ - if (tripleStore.getIndexManager() instanceof IJournal) { - - final AbstractJournal jnl = (AbstractJournal) sail.getDatabase() - .getIndexManager(); - - sb.append("file\t= " + jnl.getFile()+"\n"); - - sb.append("BufferMode\t= " + jnl.getBufferStrategy().getBufferMode()+"\n"); - - sb.append("nextOffset\t= " - + jnl.getRootBlockView().getNextOffset() + "\n"); - - } - // sb.append(tripleStore.predicateUsage()); } catch (Throwable t) { log.warn(t.getMessage(), t); + } finally { + + if(conn != null) { + try { + conn.close(); + } catch (RepositoryException e) { + log.error(e, e); + } + + } + } return sb; @@ -333,6 +335,9 @@ } } + /** + * FIXME Must abort any open transactions. + */ synchronized public void shutdownNow() { System.err.println("Immediate shutdown"); // interrupt all running queries. @@ -460,14 +465,76 @@ if("/status".equals(uri)) { + // @todo Could list the known namespaces. return doStatus(uri, method, header, params); - - } - + + } + + if (uri.startsWith("/namespace/")) { + + // @todo allow status query against any namespace. + return doQuery(uri, method, header, params); + + } + return new Response(HTTP_NOTFOUND, MIME_TEXT_PLAIN, uri); } + /** + * Return the namespace which will be used to execute the query. The + * namespace is represented by the first component of the URI. If there is + * no namespace, then return the configured default namespace. + * + * @param uri + * The URI path string. + * + * @return The namespace. + */ + private String getNamespace(final String uri) { + +// // locate the "//" after the protocol. +// final int index = uri.indexOf("//"); + + if(!uri.startsWith("/namespace/")) { + // use the default namespace. + return config.namespace; + } + + // locate the next "/" in the URI path. + final int beginIndex = uri.indexOf('/', 1/* fromIndex */); + + // locate the next "/" in the URI path. + int endIndex = uri.indexOf('/', beginIndex + 1/* fromIndex */); + + if (endIndex == -1) { + // use the rest of the URI. + endIndex = uri.length(); + } + + // return the namespace. + return uri.substring(beginIndex + 1, endIndex); + + } + + /** + * Return the timestamp which will be used to execute the query. + * + * @todo the configured timestamp should only be used for the default + * namespace (or it should be configured for each graph explicitly, or + * we should bundle the (namespace,timestamp) together as a single + * object). + * + * @todo use path for the timestamp or acquire read lock when the server + * starts against a specific namespace? + */ + private long getTimestamp(final String uri, + final LinkedHashMap<String, Vector<String>> params) { + + return config.timestamp; + + } + /** * Respond to a status request. * @@ -495,19 +562,26 @@ if (showKBInfo) { // General information on the connected kb. - sb.append(getKBInfo()); + sb.append(getKBInfo(getNamespace(uri), getTimestamp(uri, params))); } - if (repo.getDatabase().getIndexManager() instanceof IJournal) { + if (indexManager instanceof IJournal) { /* * Stuff which is specific to a local/embedded database. */ - final AbstractJournal jnl = (AbstractJournal) repo.getDatabase() - .getIndexManager(); + final AbstractJournal jnl = (AbstractJournal) indexManager; + sb.append("file\t= " + jnl.getFile() + "\n"); + + sb.append("BufferMode\t= " + + jnl.getBufferStrategy().getBufferMode() + "\n"); + + sb.append("nextOffset\t= " + jnl.getRootBlockView().getNextOffset() + + "\n"); + if (LRUNexus.INSTANCE != null) { sb.append(LRUNexus.Options.CLASS + "=" @@ -522,7 +596,6 @@ } // show the disk access details. - sb.append(jnl.getBufferStrategy().getCounters().toString()+"\n\n"); } @@ -592,6 +665,10 @@ final Properties header, final LinkedHashMap<String, Vector<String>> params) throws Exception { + final String namespace = getNamespace(uri); + + final long timestamp = getTimestamp(uri, params); + final String queryStr = params.get("query").get(0); if (queryStr == null) { @@ -615,8 +692,8 @@ */ final PipedOutputStream os = new PipedOutputStream(); final InputStream is = new PipedInputStream(os);//Bytes.kilobyte32*8/*pipeSize*/); - final FutureTask<Void> ft = new FutureTask<Void>(getQueryTask(queryStr, - os)); + final FutureTask<Void> ft = new FutureTask<Void>(getQueryTask( + namespace, timestamp, queryStr, os)); try { // Choose an appropriate MIME type. @@ -711,8 +788,9 @@ * * @throws MalformedQueryException */ - private Callable<Void> getQueryTask(final String queryStr, - final PipedOutputStream os) throws MalformedQueryException { + private Callable<Void> getQueryTask(final String namespace, + final long timestamp, final String queryStr, + final PipedOutputStream os) throws MalformedQueryException { /* * Parse the query so we can figure out how it will need to be executed. @@ -736,142 +814,283 @@ break; case DESCRIBE: case CONSTRUCT: - return new GraphQueryTask(queryStr, os); + return new GraphQueryTask(namespace, timestamp, queryStr, os); case SELECT: - return new TupleQueryTask(queryStr, os); + return new TupleQueryTask(namespace, timestamp, queryStr, os); } throw new RuntimeException("Unknown query type: " + queryType); } - /** - * Note: A read-only connection from the lastCommitTime - * - * @throws RepositoryException - */ - protected SailRepositoryConnection getQueryConnection() - throws RepositoryException { + /** + * Note: A read-only connection. + * + * @param namespace + * @param timestamp + * + * @throws RepositoryException + * + * @todo enforce historical query by making sure timestamps conform (we do + * not want to allow read/write tx queries unless update semantics are + * introduced ala SPARQL 1.1). + * + * @todo Use a distributed read-only tx for queries (it would be nice if a + * tx used 2PL to specify which namespaces it could touch). + */ + protected BigdataSailRepositoryConnection getQueryConnection( + final String namespace, final long timestamp) + throws RepositoryException { + + // resolve the default namespace. + final AbstractTripleStore tripleStore = (AbstractTripleStore) indexManager + .getResourceLocator().locate(namespace, timestamp); - return repo.getReadOnlyConnection(); + if (tripleStore == null) { + throw new RuntimeException("Not found: namespace=" + namespace + + ", timestamp=" + TimestampUtility.toString(timestamp)); + + } + + /* + * Since the kb exists, wrap it as a sail. + * + * @todo cache? close when not in use any more? + */ + final BigdataSail sail = new BigdataSail(tripleStore); + + final BigdataSailRepository repo = new BigdataSailRepository(sail); + + repo.initialize(); + + return (BigdataSailRepositoryConnection) repo + .getReadOnlyConnection(timestamp); + } /** - * Executes a tuple query. + * Abstract base class for running queries handles the timing, pipe, + * reporting, obtains the connection, and provides the finally {} semantics + * for each type of query task. * - * @todo Extract a base class which handles the timing, pipe, reporting, - * obtains the connection, and provides the finally {} semantics for - * each type of query task. + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + * @version $Id$ */ - private class TupleQueryTask implements Callable<Void> { + abstract private class AbstractQueryTask implements Callable<Void> { + + /** The namespace against which the query will be run. */ + private final String namespace; - private final String queryStr; - private final PipedOutputStream os; + /** + * The timestamp of the view for that namespace against which the query + * will be run. + */ + private final long timestamp; - public TupleQueryTask(final String queryStr, final PipedOutputStream os) { + /** The SPARQL query string. */ + protected final String queryStr; - this.queryStr = queryStr; - this.os = os; + /** A pipe used to incrementally deliver the results to the client. */ + private final PipedOutputStream os; - } + /** + * + * @param namespace + * The namespace against which the query will be run. + * @param timestamp + * The timestamp of the view for that namespace against which + * the query will be run. + * @param queryStr + * The SPARQL query string. + * @param os + * A pipe used to incrementally deliver the results to the + * client. + */ + protected AbstractQueryTask(final String namespace, + final long timestamp, final String queryStr, + final PipedOutputStream os) { - public Void call() throws Exception { - final Long queryId = Long.valueOf(queryIdFactory.incrementAndGet()); - final SailRepositoryConnection cxn = getQueryConnection(); - try { - final long begin = System.nanoTime(); - queries.put(queryId, new RunningQuery(queryId.longValue(), queryStr, begin)); - final TupleQuery query = cxn.prepareTupleQuery( - QueryLanguage.SPARQL, queryStr); - query.evaluate(new SPARQLResultsXMLWriter(new XMLWriter(os))); - os.close(); - return null; - } catch (Throwable t) { - // launder and rethrow the exception. - throw launderThrowable(t,os); - } finally { - try { - cxn.close(); - } finally { - queries.remove(queryId); - } - } + this.namespace = namespace; + this.timestamp = timestamp; + this.queryStr = queryStr; + this.os = os; + + } + + /** + * Execute the query. + * + * @param cxn + * The connection. + * @param os + * Where the write the query results. + * + * @throws Exception + */ + abstract protected void doQuery(SailRepositoryConnection cxn, + OutputStream os) throws Exception; + + final public Void call() throws Exception { + final Long queryId = Long.valueOf(queryIdFactory.incrementAndGet()); + final SailRepositoryConnection cxn = getQueryConnection(namespace, + timestamp); + final long begin = System.nanoTime(); + try { + queries.put(queryId, new RunningQuery(queryId.longValue(), + queryStr, begin)); + doQuery(cxn, os); + os.flush(); + return null; + } catch (Throwable t) { + // launder and rethrow the exception. + throw launderThrowable(t, os); + } finally { + queries.remove(queryId); + try { + os.close(); + } catch (Throwable t) { + log.error(t, t); + } + try { + cxn.close(); + } catch (Throwable t) { + log.error(t, t); + } + } + } + + } + + /** + * Executes a tuple query. + */ + private class TupleQueryTask extends AbstractQueryTask { + + public TupleQueryTask(final String namespace, final long timestamp, + final String queryStr, final PipedOutputStream os) { + + super(namespace, timestamp, queryStr, os); + } + protected void doQuery(final SailRepositoryConnection cxn, + final OutputStream os) throws Exception { + + final TupleQuery query = cxn.prepareTupleQuery( + QueryLanguage.SPARQL, queryStr); + + query.evaluate(new SPARQLResultsXMLWriter(new XMLWriter(os))); + + } + +// public Void call() throws Exception { +// final Long queryId = Long.valueOf(queryIdFactory.incrementAndGet()); +// final SailRepositoryConnection cxn = getQueryConnection(); +// try { +// final long begin = System.nanoTime(); +// queries.put(queryId, new RunningQuery(queryId.longValue(), queryStr, begin)); +// final TupleQuery query = cxn.prepareTupleQuery( +// QueryLanguage.SPARQL, queryStr); +// query.evaluate(new SPARQLResultsXMLWriter(new XMLWriter(os))); +// os.close(); +// return null; +// } catch (Throwable t) { +// // launder and rethrow the exception. +// throw launderThrowable(t,os); +// } finally { +// try { +// cxn.close(); +// } finally { +// queries.remove(queryId); +// } +// } +// } + } /** * Executes a graph query. */ - private class GraphQueryTask implements Callable<Void> { + private class GraphQueryTask extends AbstractQueryTask { - private final String queryStr; - private final PipedOutputStream os; + public GraphQueryTask(final String namespace, final long timestamp, + final String queryStr, final PipedOutputStream os) { - public GraphQueryTask(final String queryStr, final PipedOutputStream os) { + super(namespace,timestamp,queryStr,os); - this.queryStr = queryStr; - this.os = os; - } - public Void call() throws Exception { - final Long queryId = Long.valueOf(queryIdFactory.incrementAndGet()); - final SailRepositoryConnection cxn = getQueryConnection(); - try { - final long begin = System.nanoTime(); - queries.put(queryId, new RunningQuery(queryId.longValue(), queryStr, begin)); - final BigdataSailGraphQuery query = (BigdataSailGraphQuery) cxn - .prepareGraphQuery(QueryLanguage.SPARQL, queryStr); - query.evaluate(new RDFXMLWriter(os)); - os.close(); - return null; - } catch (Throwable t) { - throw launderThrowable(t, os); - } finally { - try { - cxn.close(); - } finally { - queries.remove(queryId); - } - } - } - +// public Void call() throws Exception { +// final Long queryId = Long.valueOf(queryIdFactory.incrementAndGet()); +// final SailRepositoryConnection cxn = getQueryConnection(); +// try { +// final long begin = System.nanoTime(); +// queries.put(queryId, new RunningQuery(queryId.longValue(), queryStr, begin)); +// final BigdataSailGraphQuery query = (BigdataSailGraphQuery) cxn +// .prepareGraphQuery(QueryLanguage.SPARQL, queryStr); +// query.evaluate(new RDFXMLWriter(os)); +// os.close(); +// return null; +// } catch (Throwable t) { +// throw launderThrowable(t, os); +// } finally { +// try { +// cxn.close(); +// } finally { +// queries.remove(queryId); +// } +// } +// } + + @Override + protected void doQuery(final SailRepositoryConnection cxn, + final OutputStream os) throws Exception { + + final BigdataSailGraphQuery query = (BigdataSailGraphQuery) cxn + .prepareGraphQuery(QueryLanguage.SPARQL, queryStr); + + query.evaluate(new RDFXMLWriter(os)); + + } + } - /** - * Send a STOP message to the service - * - * @param port The port for that service. - * - * @throws IOException - * - * @todo This winds up warning <pre> java.net.SocketTimeoutException: Read timed out</pre> - * even though the shutdown request was accepted and processed by the server. I'm not - * sure why. - */ - public static void sendStop(final int port) throws IOException { - - final URL url = new URL("http://localhost:" + port+"/stop"); - HttpURLConnection conn = null; - try { + /** + * Send a STOP message to the service + * + * @param port + * The port for that service. + * + * @throws IOException + * + * @todo This winds up warning <code> + * java.net.SocketTimeoutException: Read timed out + * </code> even though the shutdown request was + * accepted and processed by the server. I'm not sure why. + */ + public static void sendStop(final int port) throws IOException { - conn = (HttpURLConnection) url.openConnection(); - conn.setRequestMethod("POST"); - conn.setDoInput(true); // true to read from the server. - conn.setDoOutput(true); // true to write to the server. - conn.setUseCaches(false); - conn.setReadTimeout(2000/* ms */); - conn.setRequestProperty("Content-Type", - "application/x-www-form-urlencoded"); - conn.setRequestProperty("Content-Length", "" + Integer.toString(0)); - conn.setRequestProperty("Content-Language", "en-US"); + final URL url = new URL("http://localhost:" + port + "/stop"); + HttpURLConnection conn = null; + try { - // Send request - conn.getOutputStream().close(); + conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod("POST"); + conn.setDoInput(true); // true to read from the server. + conn.setDoOutput(true); // true to write to the server. + conn.setUseCaches(false); + conn.setReadTimeout(2000/* ms */); + conn.setRequestProperty("Content-Type", + "application/x-www-form-urlencoded"); + conn.setRequestProperty("Content-Length", "" + Integer.toString(0)); + conn.setRequestProperty("Content-Language", "en-US"); - // connect. + // Send request + conn.getOutputStream().close(); + + // connect. try { conn.connect(); @@ -913,16 +1132,21 @@ public int port; /** - * The namespace of the KB instance. + * The default namespace. */ public String namespace; + /** + * The default timestamp used to query the default namespace. + */ + public long timestamp; + /** * The #of threads to use to handle SPARQL queries -or- ZERO (0) for an * unbounded pool. */ public int queryThreadPoolSize = 8; - + public Config() { } @@ -934,47 +1158,51 @@ } - /** - * Run an httpd service exposing a SPARQL endpoint. The service will respond - * at the root path for the specified port. - * - * @param args - * USAGE: <code>port -stop</code> to stop the server; OR<br/> - * <code>(options) <i>namespace</i> (propertyFile|configFile) )</code> - * where - * <dl> - * <dt>port</dt> - * <dd>The port on which the service will respond.</dd> - * <dt>namespace</dt> - * <dd>The namespace of the target KB instance ("kb" is the - * default namespace).</dd> - * <dt>propertyFile</dt> - * <dd>A java properties file for a standalone {@link Journal}.</dd> - * <dt>configFile</dt> - * <dd>A jini configuration file for a bigdata federation.</dd> - * </dl> - * and <i>options</i> are any of: - * <dl> - * <dt>-nthreads</dt> - * <dd>The #of threads which will be used to answer SPARQL queries.</dd> - * <dt></dt> - * <dd></dd> - * <dt></dt> - * <dd></dd> - * <dt></dt> - * <dd></dd> - * <dt></dt> - * <dd></dd> - * <dt></dt> - * <dd></dd> - * </dl> - */ + /** + * Run an httpd service exposing a SPARQL endpoint. The service will respond + * to the following URL paths: + * <dl> + * <dt>http://localhost:port/</dt> + * <dd>The SPARQL end point for the default namespace as specified by the + * <code>namespace</code> command line argument.</dd> + * <dt>http://localhost:port/namespace/NAMESPACE</dt> + * <dd>where <code>NAMESPACE</code> is the namespace of some triple store or + * quad store, may be used to address ANY triple or quads store in the + * bigdata instance.</dd> + * <dt>http://localhost:port/status</dt> + * <dd>A status page.</dd> + * </dl> + * + * @param args + * USAGE: <code>port -stop</code> to stop the server; OR<br/> + * <code>(options) <i>namespace</i> (propertyFile|configFile) )</code> + * where + * <dl> + * <dt>port</dt> + * <dd>The port on which the service will respond.</dd> + * <dt>namespace</dt> + * <dd>The namespace of the default SPARQL endpoint (the + * namespace will be <code>kb</code> if none was specified when + * the triple/quad store was created).</dd> + * <dt>propertyFile</dt> + * <dd>A java properties file for a standalone {@link Journal}.</dd> + * <dt>configFile</dt> + * <dd>A jini configuration file for a bigdata federation.</dd> + * </dl> + * and <i>options</i> are any of: + * <dl> + * <dt>-nthreads</dt> + * <dd>The #of threads which will be used to answer SPARQL + * queries.</dd> + * </dl> + */ public static void main(final String[] args) { final Config config = new Config(); config.port = 80; Journal jnl = null; JiniClient<?> jiniClient = null; NanoSparqlServer server = null; + ITransactionService txs = null; try { /* * ` @@ -1073,6 +1301,14 @@ } + txs = (indexManager instanceof Journal ? ((Journal) indexManager) + .getTransactionManager().getTransactionService() + : ((IBigdataFederation<?>) indexManager) + .getTransactionService()); + + config.timestamp = txs.newTx(ITx.READ_COMMITTED); + System.out.println("tx: " + config.timestamp); + // start the server. server = new NanoSparqlServer(config, indexManager); @@ -1104,9 +1340,10 @@ if (true) { // @todo if(!quiet) or if(verbose) /* - * Log some information about the kb (#of statements, etc). + * Log some information about the default kb (#of statements, etc). */ - System.out.println(server.getKBInfo()); + System.out.println(server.getKBInfo(config.namespace, + config.timestamp)); } /* @@ -1126,7 +1363,15 @@ } catch (Throwable ex) { ex.printStackTrace(); - } finally { + } finally { + if (txs != null) { + try { + txs.abort(config.timestamp); + } catch (IOException e) { + log.error("Could not release transaction: tx=" + + config.timestamp, e); + } + } if (server != null) server.shutdownNow(); if (jnl != null) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <fko...@us...> - 2010-07-28 18:31:58
|
Revision: 3336 http://bigdata.svn.sourceforge.net/bigdata/?rev=3336&view=rev Author: fkoliver Date: 2010-07-28 18:31:51 +0000 (Wed, 28 Jul 2010) Log Message: ----------- [trunk] Ticket #119: Replaced the signal handlers in the load balancer and the services manager with remote methods. Extended the BroadcastSighup class to call the new sighup methods on discovered services. Add broadcast_sighup command as a wrapper around the class: arguments specify which type of service, and whether all or only local services are affected. Update other scripts to replace kill commands with the new script. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/service/AbstractService.java trunk/bigdata/src/java/com/bigdata/service/ILoadBalancerService.java trunk/bigdata/src/java/com/bigdata/service/LoadBalancerService.java trunk/bigdata-jini/src/java/com/bigdata/jini/start/AbstractServicesManagerService.java trunk/bigdata-jini/src/java/com/bigdata/jini/start/ServicesManagerServer.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/AbstractServer.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/LoadBalancerServer.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/BroadcastSighup.java trunk/src/resources/scripts/archiveRun.sh trunk/src/resources/scripts/bigdata trunk/src/resources/scripts/extractCounters.sh Added Paths: ----------- trunk/src/resources/scripts/broadcast_sighup Modified: trunk/bigdata/src/java/com/bigdata/service/AbstractService.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/service/AbstractService.java 2010-07-28 16:46:24 UTC (rev 3335) +++ trunk/bigdata/src/java/com/bigdata/service/AbstractService.java 2010-07-28 18:31:51 UTC (rev 3336) @@ -46,7 +46,7 @@ */ abstract public class AbstractService implements IService { - protected static final Logger log = Logger.getLogger(AbstractService.class); + private static final Logger log = Logger.getLogger(AbstractService.class); private String serviceName; private UUID serviceUUID; Modified: trunk/bigdata/src/java/com/bigdata/service/ILoadBalancerService.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/service/ILoadBalancerService.java 2010-07-28 16:46:24 UTC (rev 3335) +++ trunk/bigdata/src/java/com/bigdata/service/ILoadBalancerService.java 2010-07-28 18:31:51 UTC (rev 3336) @@ -173,6 +173,11 @@ */ public boolean isUnderUtilizedDataService(UUID serviceUUID) throws IOException; + /** + * Logs counters to a temp file. Replacement for sighup mechanism. + */ + public void sighup() throws IOException; + // /** // * Return the identifier(s) of under-utilized service(s). // * Modified: trunk/bigdata/src/java/com/bigdata/service/LoadBalancerService.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/service/LoadBalancerService.java 2010-07-28 16:46:24 UTC (rev 3335) +++ trunk/bigdata/src/java/com/bigdata/service/LoadBalancerService.java 2010-07-28 18:31:51 UTC (rev 3336) @@ -676,6 +676,7 @@ } + @Override synchronized public void shutdown() { if(!isOpen()) return; @@ -692,7 +693,7 @@ * Obtain the exclusive write lock for the event BTree before flushing * writes. */ - final Lock lock = eventReceiver.getWriteLock(); + final Lock tmpLock = eventReceiver.getWriteLock(); try { // Flush any buffered writes to the event store. @@ -707,7 +708,7 @@ } finally { - lock.unlock(); + tmpLock.unlock(); } @@ -718,6 +719,7 @@ } + @Override synchronized public void shutdownNow() { if(!isOpen()) return; @@ -740,6 +742,7 @@ } + @Override synchronized public void destroy() { super.destroy(); @@ -2017,8 +2020,38 @@ } } - + /** + * Logs the counters on a file created using + * {@link File#createTempFile(String, String, File)} in the log + * directory. + * + * @throws IOException + * + * @todo this method is not exposed to RMI (it is not on any + * {@link Remote} interface) but it could be. + */ + public void logCounters() throws IOException { + + if (isTransient) { + + log.warn("LBS is transient - request ignored."); + + return; + + } + + final File file = File.createTempFile("counters-hup", ".xml", logDir); + + logCounters(file); + + } + + public void sighup() throws IOException { + logCounters(); + } + + /** * Notify the {@link LoadBalancerService} that a new service is available. * <p> * Note: Embedded services must invoke this method <em>directly</em> when Modified: trunk/bigdata-jini/src/java/com/bigdata/jini/start/AbstractServicesManagerService.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/jini/start/AbstractServicesManagerService.java 2010-07-28 16:46:24 UTC (rev 3335) +++ trunk/bigdata-jini/src/java/com/bigdata/jini/start/AbstractServicesManagerService.java 2010-07-28 18:31:51 UTC (rev 3336) @@ -13,6 +13,7 @@ import com.bigdata.service.IServiceShutdown; import com.bigdata.service.jini.JiniFederation; import com.bigdata.service.jini.RemoteDestroyAdmin; +import org.apache.log4j.Logger; /** * Core impl. @@ -23,6 +24,7 @@ public abstract class AbstractServicesManagerService extends AbstractService implements IServicesManagerService, IServiceListener, IServiceShutdown { + private static final Logger log = Logger.getLogger(AbstractServicesManagerService.class); private final Properties properties; /** Modified: trunk/bigdata-jini/src/java/com/bigdata/jini/start/ServicesManagerServer.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/jini/start/ServicesManagerServer.java 2010-07-28 16:46:24 UTC (rev 3335) +++ trunk/bigdata-jini/src/java/com/bigdata/jini/start/ServicesManagerServer.java 2010-07-28 18:31:51 UTC (rev 3336) @@ -45,9 +45,6 @@ import org.apache.log4j.Logger; import org.apache.log4j.MDC; -import sun.misc.Signal; -import sun.misc.SignalHandler; - import com.bigdata.btree.IndexSegment; import com.bigdata.jini.start.config.IServiceConstraint; import com.bigdata.jini.start.config.JiniCoreServicesConfiguration; @@ -357,101 +354,9 @@ super(args, lifeCycle); this.args = args; - - try { - - /* - * Note: This signal is not supported under Windows. You can use the - * sighup() method to accomplish the same ends via RMI. - */ - new SigHUPHandler("HUP"); - - } catch (IllegalArgumentException ex) { - - log.warn("Signal handler not installed: " + ex); - - } - } /** - * SIGHUP Handler. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ - private class SigHUPHandler implements SignalHandler { - - private final SignalHandler oldHandler; - - /** - * Install handler. - * - * @param signalName - * The signal name. - * @param args - * The command line arguments (these identify the - * configuration and any overrides). - * - * @see http://www-128.ibm.com/developerworks/java/library/i-signalhandling/ - * - * @see http://forum.java.sun.com/thread.jspa?threadID=514860&messageID=2451429 - * for the use of {@link Runtime#addShutdownHook(Thread)}. - * - * @see http://twit88.com/blog/2008/02/06/java-signal-handling/ - */ - @SuppressWarnings("all") // Signal is in the sun namespace - protected SigHUPHandler(final String signalName) { - - final Signal signal = new Signal(signalName); - - this.oldHandler = Signal.handle(signal, this); - - if (log.isInfoEnabled()) - log.info("Installed handler: " + signal + ", oldHandler=" - + this.oldHandler); - - } - - @SuppressWarnings("all") // Signal is in the sun namespace - public void handle(final Signal sig) { - - log.warn("Processing signal: " + sig); - - try { - - final AbstractServicesManagerService service = (AbstractServicesManagerService) impl; - - if (service != null) { - - service - .sighup(true/* pushConfig */, true/*restartServices*/); - - } - - /* - * This appears willing to halt the server so I am not chaining - * back to the previous handler! - */ - -// // Chain back to previous handler, if one exists -// if (oldHandler != SIG_DFL && oldHandler != SIG_IGN) { -// -// oldHandler.handle(sig); -// -// } - - } catch (Throwable t) { - - log.error("Signal handler failed : " + t, t); - - } - - } - - } - - /** * Starts and maintains services based on the specified configuration file * and/or an existing zookeeper ensemble. * Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/AbstractServer.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/AbstractServer.java 2010-07-28 16:46:24 UTC (rev 3335) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/AbstractServer.java 2010-07-28 18:31:51 UTC (rev 3336) @@ -158,7 +158,7 @@ abstract public class AbstractServer implements Runnable, LeaseListener, ServiceIDListener { - final static protected Logger log = Logger.getLogger(AbstractServer.class); + final static private Logger log = Logger.getLogger(AbstractServer.class); // /** // * True iff the {@link #log} level is log.isInfoEnabled() or less. @@ -475,6 +475,13 @@ setSecurityManager(); + Thread.setDefaultUncaughtExceptionHandler( + new Thread.UncaughtExceptionHandler() { + public void uncaughtException(Thread t, Throwable e) { + log.warn("Uncaught exception in thread", e); + } + }); + /* * Read jini configuration & service properties */ @@ -1757,7 +1764,7 @@ try { - ((IService) tmp).destroy(); + tmp.destroy(); } catch (Throwable ex) { Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/LoadBalancerServer.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/LoadBalancerServer.java 2010-07-28 16:46:24 UTC (rev 3335) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/LoadBalancerServer.java 2010-07-28 18:31:51 UTC (rev 3336) @@ -1,12 +1,10 @@ package com.bigdata.service.jini; -import java.io.File; import java.io.IOException; import java.io.StringWriter; import java.net.Inet4Address; import java.net.InetAddress; import java.net.UnknownHostException; -import java.rmi.Remote; import java.rmi.RemoteException; import java.rmi.server.ServerNotActiveException; import java.util.Collection; @@ -23,9 +21,6 @@ import org.apache.log4j.MDC; -import sun.misc.Signal; -import sun.misc.SignalHandler; - import com.bigdata.counters.CounterSet; import com.bigdata.counters.httpd.CounterSetHTTPD; import com.bigdata.journal.ITx; @@ -84,19 +79,6 @@ public LoadBalancerServer(final String[] args, final LifeCycle lifeCycle) { super(args, lifeCycle); - - try { - - /* - * Note: This signal is not supported under Windows. - */ - new SigHUPHandler("HUP"); - - } catch (IllegalArgumentException ex) { - - log.warn("Signal handler not installed: " + ex); - - } } /** @@ -153,80 +135,7 @@ return service; } - - /** - * SIGHUP Handler. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ - private class SigHUPHandler implements SignalHandler { - private final SignalHandler oldHandler; - - /** - * Install handler. - * - * @param signalName - * The signal name. - * - * @see http://www-128.ibm.com/developerworks/java/library/i-signalhandling/ - * - * @see http://forum.java.sun.com/thread.jspa?threadID=514860&messageID=2451429 - * for the use of {@link Runtime#addShutdownHook(Thread)}. - * - * @see http://twit88.com/blog/2008/02/06/java-signal-handling/ - */ - @SuppressWarnings("all") // Signal is in the sun namespace - protected SigHUPHandler(final String signalName) { - - final Signal signal = new Signal(signalName); - - this.oldHandler = Signal.handle(signal, this); - - if (log.isInfoEnabled()) - log.info("Installed handler: " + signal + ", oldHandler=" - + this.oldHandler); - - } - - @SuppressWarnings("all") // Signal is in the sun namespace - public void handle(final Signal sig) { - - log.warn("Processing signal: " + sig); - - try { - - final AdministrableLoadBalancer service = (AdministrableLoadBalancer) impl; - - if (service != null) { - - service.logCounters(); - - } - - /* - * This appears willing to halt the server so I am not chaining - * back to the previous handler! - */ - -// // Chain back to previous handler, if one exists -// if (oldHandler != SIG_DFL && oldHandler != SIG_IGN) { -// -// oldHandler.handle(sig); -// -// } - - } catch (Throwable t) { - - log.error("Signal handler failed : " + t, t); - - } - - } - - } - /** * Overrides the {@link IFederationDelegate} leave/join behavior to notify * the {@link LoadBalancerService}. @@ -250,6 +159,7 @@ /** * Notifies the {@link LoadBalancerService}. */ + @Override public void serviceJoin(IService service, UUID serviceUUID) { try { @@ -279,6 +189,7 @@ /** * Notifies the {@link LoadBalancerService}. */ + @Override public void serviceLeave(UUID serviceUUID) { if (log.isInfoEnabled()) @@ -334,6 +245,7 @@ * root path), dump of the indices in the federation (/indices), and * events (/events). */ + @Override public AbstractHTTPD newHttpd(final int httpdPort, final CounterSet counterSet) throws IOException { @@ -451,6 +363,7 @@ final IndicesHandler indicesHandler = new IndicesHandler(); + @Override public Response doGet(String uri, String method, Properties header, LinkedHashMap<String, Vector<String>> parms) throws Exception { @@ -668,33 +581,5 @@ return s; } - - /** - * Logs the counters on a file created using - * {@link File#createTempFile(String, String, File)} in the log - * directory. - * - * @throws IOException - * - * @todo this method is not exposed to RMI (it is not on any - * {@link Remote} interface) but it could be. - */ - public void logCounters() throws IOException { - - if (isTransient) { - - log.warn("LBS is transient - request ignored."); - - return; - - } - - final File file = File.createTempFile("counters-hup", ".xml", logDir); - - super.logCounters(file); - - } - } - } Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/BroadcastSighup.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/BroadcastSighup.java 2010-07-28 16:46:24 UTC (rev 3335) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/BroadcastSighup.java 2010-07-28 18:31:51 UTC (rev 3336) @@ -27,21 +27,35 @@ package com.bigdata.service.jini.util; +import com.bigdata.jini.lookup.entry.Hostname; +import java.io.IOException; +import java.net.UnknownHostException; import org.apache.log4j.Logger; import net.jini.config.ConfigurationException; import net.jini.core.lookup.ServiceItem; import com.bigdata.jini.start.IServicesManagerService; +import com.bigdata.service.ILoadBalancerService; import com.bigdata.service.jini.JiniClient; import com.bigdata.service.jini.JiniFederation; +import java.net.InetAddress; +import net.jini.config.Configuration; +import net.jini.core.entry.Entry; +import net.jini.core.lookup.ServiceTemplate; +import net.jini.lookup.ServiceItemFilter; /** - * Utility will broadcast the {@link IServicesManagerService#sighup()} method to - * all discovered {@link IServicesManagerService}s in federation to which it + * Utility will broadcast the + * {@link IServicesManagerService#sighup(boolean,boolean)} method or + * {@link ILoadBalancerService#sighup()} method to either local or + * all discovered {@link IServicesManagerService}s + * or {@link ILoadBalancerService}s in federation to which it * connects. Each discovered {@link IServicesManagerService} will push the * service configuration to zookeeper and then restart any processes for which * it has responsibility which are not currently running. + * Each discovered {@link ILoadBalancerService} will log current counters to + * files. * <p> * Note: If you are running a federation on a cluster, you can achieve the same * effect by changing the federation run state to <code>hup</code> and then @@ -64,6 +78,16 @@ * following options are defined: * <dl> * + * <dt>localOrRemote</dt> + * <dd>If "local", then consider only services running on the local host + * (similar to what linux "kill -hup" signal used to do). If + * "all" then call sighup() on all services found. </dd> + * + * <dt>signalTarget</dt> + * <dd>If "servicesManager", then send signals only to instances of + * IServicesManagerService. If "loadBalancer", then send signals only to + * instances of ILoadBalancerService. </dd> + * * <dt>discoveryDelay</dt> * <dd>The time in milliseconds to wait for service discovery before * proceeding.</dd> @@ -84,57 +108,111 @@ * @throws InterruptedException * @throws ConfigurationException */ - public static void main(final String[] args) throws InterruptedException, - ConfigurationException { + public static void main(final String[] args) { + try { + main2(args); + } catch (Exception e) { + e.printStackTrace(); + log.warn("Unexpected exception", e); + } + } - final JiniFederation fed = JiniClient.newInstance(args).connect(); + private static void main2(final String[] args) throws InterruptedException, + ConfigurationException, UnknownHostException, IOException { - final long discoveryDelay = (Long) fed - .getClient() - .getConfiguration() + // Get the configuration and set up the federation. + + final JiniClient client = JiniClient.newInstance(args); + final JiniFederation fed = client.connect(); + final Configuration config = client.getConfiguration(); + + final long discoveryDelay = (Long) config .getEntry(COMPONENT, "discoveryDelay", Long.TYPE, 5000L/* default */); - final boolean pushConfig = (Boolean) fed - .getClient() - .getConfiguration() + final boolean pushConfig = (Boolean) config .getEntry(COMPONENT, "pushConfig", Boolean.TYPE, true/* default */); - final boolean restartServices = (Boolean) fed.getClient() - .getConfiguration().getEntry(COMPONENT, "restartServices", + final String localOrAll = (String) config + .getEntry(COMPONENT, "localOrAll", String.class, "all"); + + final String signalTarget = (String) config + .getEntry(COMPONENT, "signalTarget", String.class, + "servicesManager"); + + final boolean restartServices = (Boolean) config + .getEntry(COMPONENT, "restartServices", Boolean.TYPE, true/* default */); + // Identify the bigdata interface associated with the service + // to which the signal will be delivered. + + Class iface = null; + if (signalTarget.equals("servicesManager")) { + iface = IServicesManagerService.class; + } else if (signalTarget.equals("loadBalancer")) { + iface = ILoadBalancerService.class; + } else { + log.warn("Unexpected target for signal: " + signalTarget); + System.exit(1); + } + + // Set up the service template and filter used to identify the service. + + final String hostname = InetAddress.getLocalHost() + .getCanonicalHostName().toString(); + ServiceTemplate template = new ServiceTemplate(null, + new Class[] { iface }, null); + ServiceItemFilter thisHostFilter = null; + if (localOrAll.equals("local")) { + thisHostFilter = new ServiceItemFilter() { + public boolean check(ServiceItem item) { + for (Entry entry : item.attributeSets) { + if (entry instanceof Hostname && + ((Hostname)entry).hostname.equals(hostname)) { + return true; + } + } + return false; + } + }; + } else if (!localOrAll.equals("all")) { + log.warn("Unexpected option for signal: " + localOrAll); + System.exit(1); + } + + // Use the federation's discovery manager to lookup bigdata + // services of interest. + System.out.println("Waiting " + discoveryDelay + "ms for service discovery."); + ServiceItem[] items = + fed.getServiceDiscoveryManager() + .lookup(template, Integer.MAX_VALUE, Integer.MAX_VALUE, + thisHostFilter, discoveryDelay); - Thread.sleep(discoveryDelay/* ms */); + // Call the service's appropriate interface method. - final ServiceItem[] a = fed.getServicesManagerClient() - .getServiceCache() - .getServiceItems(0/* maxCount */, null/* filter */); - int n = 0; - for (ServiceItem item : a) { - + for (ServiceItem item : items) { try { - - ((IServicesManagerService) item.service).sighup(pushConfig, - restartServices); - - n++; - - } catch(Throwable t) { - - log.warn(item, t); + if (signalTarget.equals("servicesManager")) { + ((IServicesManagerService) item.service) + .sighup(pushConfig, restartServices); + ++n; + } else if (signalTarget.equals("loadBalancer")) { + ((ILoadBalancerService) item.service).sighup(); + ++n; + + } else { + log.warn("Unexpected target for signal: " + signalTarget); + } + } catch (Exception e) { + e.printStackTrace(); + log.warn("Unexpected target for signal: " + signalTarget); } - } - - System.out.println("Signal sent to " + n + " of " + a.length - + " services managers."); - - System.exit(0); - + System.out.println("Signal sent to " + n + " of " + items.length + + " instances of " + signalTarget + "."); } - } Modified: trunk/src/resources/scripts/archiveRun.sh =================================================================== --- trunk/src/resources/scripts/archiveRun.sh 2010-07-28 16:46:24 UTC (rev 3335) +++ trunk/src/resources/scripts/archiveRun.sh 2010-07-28 18:31:51 UTC (rev 3336) @@ -19,7 +19,8 @@ exit 1 fi -source `dirname $0`/bigdataenv +BINDIR=`dirname $0` +source $BINDIR/bigdataenv targetDir=$1 @@ -29,21 +30,13 @@ mkdir -p $targetDir/counters mkdir -p $targetDir/indexDumps -# Look for the load balancer service directory on the local host. If -# we find it, then we read the pid for the LBS and send it a HUP signal -# so it will write a snapshot of its performance counters. +# Broadcast a HUP request to the load balancer in the federation so +# that it will write a snapshot of its performance counters. waitDur=60 -if [ -f "$lockFile" ]; then - read pid < `find $LAS -name pid | grep LoadBalancerServer` - if [ -z "$pid" ]; then - echo "Could not find LoadBalancer process: `hostname` LAS=$LAS." - else - echo "Sending HUP to the LoadBalancer: $pid" - kill -hup $pid - echo "Waiting $waitDur seconds for the performance counter dump." - sleep $waitDur - fi -fi +echo "Sending HUP to the LoadBalancer: $pid" +$BINDIR/broadcast_sighup local loadBalancer +echo "Waiting $waitDur seconds for the performance counter dump." +sleep $waitDur # Copy the configuration file and the various log files. cp -v $BIGDATA_CONFIG \ Modified: trunk/src/resources/scripts/bigdata =================================================================== --- trunk/src/resources/scripts/bigdata 2010-07-28 16:46:24 UTC (rev 3335) +++ trunk/src/resources/scripts/bigdata 2010-07-28 18:31:51 UTC (rev 3336) @@ -266,7 +266,7 @@ if [ -z "$pidno" ]; then echo $"`date` : `hostname` : process died? pid=$pid." else - kill -s hup $pid + ./broadcast_sighup local servicesManager echo $"`date` : `hostname` : sent SIGHUP pid=$pid." fi else Added: trunk/src/resources/scripts/broadcast_sighup =================================================================== --- trunk/src/resources/scripts/broadcast_sighup (rev 0) +++ trunk/src/resources/scripts/broadcast_sighup 2010-07-28 18:31:51 UTC (rev 3336) @@ -0,0 +1,63 @@ +#!/bin/bash + +## +# Script sends equivalent of sighup to a services manager or load balancer. +# + +usage() { + echo "Usage: $0 ( local | all ) ( servicesManager | loadBalancer )" 1>&2 + exit 1 +} + +if [ $# -ne 2 ] ; then + usage +fi +if [ X"$1" != "Xlocal" -a X"$1" != "Xall" ] ; then + usage +fi +local_or_all=$1 +if [ X"$2" != "XservicesManager" -a X"$2" != "XloadBalancer" ] ; then + usage +fi +target=$2 + +# Setup the environment. +cd `dirname $0` +source ./bigdataenv + +# Verify critical environment variables. +if [ -z "$JAVA_OPTS" ]; then + echo $"`date` : hostname : environment not setup." + exit 1; +fi +if [ -z "$CLASSPATH" ]; then + echo $"`date` : hostname : environment not setup." + exit 1; +fi +if [ -z "$BIGDATA_CONFIG" ]; then + echo $"`date` : hostname : environment not setup." + exit 1; +fi + +# Start the services manager on this host. +# +# Note: This explicitly specifies a small heap for the services manager since +# it uses very little heap space and we can avoid problems with contention for +# virtual memory by not permitting JVM defaults to grant this a large maximum +# heap on machines with lots of RAM. +# +# Allow JVM to be available for debugger to attach. +#NIC="lo" +#IP_ADDR=`ifconfig ${NIC} | sed -n -e s'/.*inet addr:\([0-9.]*\).*/\1/p'` +#JDWP_OPTS="transport=dt_socket,server=y,address=${IP_ADDR}:33340,suspend=y" +#JAVA_OPTS="-ea -Xdebug -Xrunjdwp:${JDWP_OPTS} ${JAVA_OPTS}" +java ${JAVA_OPTS} \ + -cp ${CLASSPATH} \ + com.bigdata.service.jini.util.BroadcastSighup \ + ${BIGDATA_CONFIG} \ + "com.bigdata.service.jini.util.BroadcastSighup.signalTarget=\"$target\"" \ + "com.bigdata.service.jini.util.BroadcastSighup.localOrAll=\"$local_or_all\"" \ + "com.bigdata.service.jini.util.BroadcastSighup.pushConfig=true" \ + "com.bigdata.service.jini.util.BroadcastSighup.restartServices=true" \ + ${BIGDATA_CONFIG_OVERRIDES} +exit 0 Modified: trunk/src/resources/scripts/extractCounters.sh =================================================================== --- trunk/src/resources/scripts/extractCounters.sh 2010-07-28 16:46:24 UTC (rev 3335) +++ trunk/src/resources/scripts/extractCounters.sh 2010-07-28 18:31:51 UTC (rev 3336) @@ -18,7 +18,8 @@ exit 1 fi -source `dirname $0`/bigdataenv +BINDIR=`dirname $0` +source $BINDIR/bigdataenv targetDir=$1 @@ -41,11 +42,8 @@ exit 1 fi -# -# Look for the load balancer service directory on the local host. If -# we find it, then we read the pid for the LBS and send it a HUP signal -# so it will write a snapshot of its performance counters. -# +# Broadcast a HUP request to the load balancer in the federation so +# that it will write a snapshot of its performance counters. # How long to wait for the LBS to dump a current snapshot. waitDur=60 @@ -54,13 +52,8 @@ tarball=$targetDir-output.tgz if [ -f "$lockFile" ]; then - read pid < "$lbsDir/pid" - if [ -z "$pid" ]; then - echo "Could not find LoadBalancer process: `hostname` lbsDir=$lbsDir" - exit 1 - fi echo "Sending HUP to the LoadBalancer: $pid" - kill -hup $pid + $BINDIR/broadcast_sighup local loadBalancer echo "Waiting $waitDur seconds for the performance counter dump." sleep $waitDur ant "-Danalysis.counters.dir=$lbsDir"\ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <btm...@us...> - 2010-07-28 21:14:34
|
Revision: 3338 http://bigdata.svn.sourceforge.net/bigdata/?rev=3338&view=rev Author: btmurphy Date: 2010-07-28 21:14:27 +0000 (Wed, 28 Jul 2010) Log Message: ----------- [trunk]: trac #126 - Uses of InetAddress.getLocalAddress should be changed to a mechanism that returns a non-loopback address Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java trunk/bigdata/src/java/com/bigdata/resources/MoveTask.java trunk/bigdata/src/java/com/bigdata/service/AbstractEmbeddedLoadBalancerService.java trunk/bigdata/src/test/com/bigdata/counters/httpd/TestCounterSetHTTPDServer.java trunk/bigdata/src/test/com/bigdata/service/TestResourceService.java trunk/bigdata/src/test/com/bigdata/test/ExperimentDriver.java trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/MaxClientServicesPerHostConstraint.java trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/MaxDataServicesPerHostConstraint.java trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/ZookeeperServerConfiguration.java trunk/bigdata-jini/src/java/com/bigdata/jini/start/process/JiniCoreServicesProcessHelper.java trunk/bigdata-jini/src/java/com/bigdata/jini/start/process/ZookeeperProcessHelper.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/LoadBalancerServer.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/BroadcastSighup.java trunk/bigdata-jini/src/java/com/bigdata/zookeeper/ZooHelper.java trunk/bigdata-jini/src/test/com/bigdata/jini/start/config/TestZookeeperServerEntry.java trunk/bigdata-jini/src/test/com/bigdata/service/jini/AbstractServerTestCase.java trunk/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/metrics/TestMetrics.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/AbstractServerTestCase.java trunk/build.xml trunk/src/resources/bin/pstart Modified: trunk/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -48,6 +48,7 @@ import com.bigdata.counters.win.StatisticsCollectorForWindows; import com.bigdata.io.DirectBufferPool; import com.bigdata.rawstore.Bytes; +import com.bigdata.util.config.NicUtil; import com.bigdata.util.httpd.AbstractHTTPD; /** @@ -64,7 +65,6 @@ * and Un*x platforms so as to support the declared counters on all platforms. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ abstract public class AbstractStatisticsCollector implements IStatisticsCollector { @@ -83,32 +83,10 @@ String s; try { - -// hostname = InetAddress.getLocalHost().getHostName(); - - s = InetAddress.getLocalHost().getCanonicalHostName(); - - } catch (UnknownHostException e) { - - try { - - s = InetAddress.getLocalHost().getHostName(); - - } catch(UnknownHostException e2) { - - final String msg = "Could not resolve hostname"; - try { - log.error(msg); - } catch(Throwable t) { - System.err.println(msg); - } - - s = "localhost"; - - //throw new AssertionError(e); - - } - + s = NicUtil.getIpAddress("default.nic", "default", false); + } catch(Throwable t) {//for now, maintain same failure logic as used previously + t.printStackTrace(); + s = NicUtil.getIpAddressByLocalHost(); } fullyQualifiedHostName = s; @@ -520,7 +498,6 @@ * Options for {@link AbstractStatisticsCollector} * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface Options { Modified: trunk/bigdata/src/java/com/bigdata/resources/MoveTask.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/MoveTask.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata/src/java/com/bigdata/resources/MoveTask.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -63,6 +63,7 @@ import com.bigdata.service.IMetadataService; import com.bigdata.service.MetadataService; import com.bigdata.service.ResourceService; +import com.bigdata.util.config.NicUtil; /** * Task moves an index partition to another {@link IDataService}. @@ -155,7 +156,6 @@ * need to do the atomic update phase. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class MoveTask extends AbstractPrepareTask<MoveResult> { @@ -421,7 +421,6 @@ * successful. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ * * @todo optimization to NOT send an empty index segment if there are no * buffered writes on the live journal. @@ -434,6 +433,7 @@ private final UUID targetDataServiceUUID; private final int targetIndexPartitionId; private final Event parentEvent; + private final InetAddress thisInetAddr; /** * @@ -481,6 +481,11 @@ this.targetIndexPartitionId = targetIndexPartitionId; this.parentEvent = parentEvent; + try { + this.thisInetAddr = InetAddress.getByName(NicUtil.getIpAddress("default.nic", "default", false)); + } catch(Throwable t) { + throw new IllegalArgumentException(t.getMessage(), t); + } } /** @@ -591,7 +596,7 @@ targetIndexPartitionId,// historicalWritesBuildResult.segmentMetadata,// bufferedWritesBuildResult.segmentMetadata,// - InetAddress.getLocalHost(),// + thisInetAddr, resourceManager .getResourceServicePort()// )).get(); @@ -856,7 +861,6 @@ * until the "receive" operation is complete. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ private static class IsIndexRegistered_UsingWriteService implements IIndexProcedure { @@ -896,7 +900,6 @@ * @see InnerReceiveIndexPartitionTask * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ protected static class ReceiveIndexPartitionTask extends DataServiceCallable<Void> { @@ -1039,7 +1042,6 @@ * source data service. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ private static class InnerReceiveIndexPartitionTask extends AbstractTask<Void> { Modified: trunk/bigdata/src/java/com/bigdata/service/AbstractEmbeddedLoadBalancerService.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/service/AbstractEmbeddedLoadBalancerService.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata/src/java/com/bigdata/service/AbstractEmbeddedLoadBalancerService.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -5,6 +5,8 @@ import java.util.Properties; import java.util.UUID; +import com.bigdata.util.config.NicUtil; + /** * Embedded {@link LoadBalancerService}. * @@ -14,7 +16,7 @@ abstract public class AbstractEmbeddedLoadBalancerService extends LoadBalancerService { // final private UUID serviceUUID; - final private String hostname; + private String hostname = NicUtil.getIpAddressByLocalHost();//for now, maintain the same failure logic as in constructor public AbstractEmbeddedLoadBalancerService(UUID serviceUUID, Properties properties) { @@ -28,17 +30,11 @@ setServiceUUID(serviceUUID); - String hostname; try { - - hostname = Inet4Address.getLocalHost().getCanonicalHostName(); - - } catch (UnknownHostException e) { - - hostname = "localhost"; - + this.hostname = NicUtil.getIpAddress("default.nic", "default", false); + } catch(Throwable t) { + t.printStackTrace(); } - this.hostname = hostname; } Modified: trunk/bigdata/src/test/com/bigdata/counters/httpd/TestCounterSetHTTPDServer.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/counters/httpd/TestCounterSetHTTPDServer.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata/src/test/com/bigdata/counters/httpd/TestCounterSetHTTPDServer.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -39,6 +39,7 @@ import com.bigdata.counters.Instrument; import com.bigdata.counters.OneShotInstrument; import com.bigdata.counters.PeriodEnum; +import com.bigdata.util.config.NicUtil; /** * Utility class for testing {@link CounterSetHTTPD} or @@ -62,12 +63,10 @@ CounterSet cset = root.makePath("localhost"); - cset.addCounter("hostname", new OneShotInstrument<String>( - InetAddress.getLocalHost().getHostName())); + String localIpAddr = NicUtil.getIpAddress("default.nic", "default", true); + cset.addCounter("hostname", new OneShotInstrument<String>(localIpAddr)); + cset.addCounter("ipaddr", new OneShotInstrument<String>(localIpAddr)); - cset.addCounter("ipaddr", new OneShotInstrument<String>(InetAddress - .getLocalHost().getHostAddress())); - // 60 minutes of data : @todo replace with CounterSetBTree (no fixed limit). final HistoryInstrument<Double> history1 = new HistoryInstrument<Double>( new History<Double>(new Double[60], PeriodEnum.Minutes Modified: trunk/bigdata/src/test/com/bigdata/service/TestResourceService.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/service/TestResourceService.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata/src/test/com/bigdata/service/TestResourceService.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -43,6 +43,7 @@ import com.bigdata.service.ResourceService.ReadResourceTask; import com.bigdata.util.concurrent.DaemonThreadFactory; +import com.bigdata.util.config.NicUtil; /** * Test verifies the ability to transmit a file using the @@ -103,14 +104,15 @@ }; + InetAddress thisInetAddr; + thisInetAddr = InetAddress.getByName(NicUtil.getIpAddress("default.nic", "default", true)); try { service.awaitRunning(100, TimeUnit.MILLISECONDS); assertTrue(service.isOpen()); - assertEquals(tmpFile, new ReadResourceTask(InetAddress - .getLocalHost(), service.port, allowedUUID, tmpFile).call()); + assertEquals(tmpFile, new ReadResourceTask(thisInetAddr, service.port, allowedUUID, tmpFile).call()); if (log.isInfoEnabled()) log.info(service.counters.getCounters()); @@ -182,6 +184,8 @@ final List<File> tempFiles = new LinkedList<File>(); + InetAddress thisInetAddr; + thisInetAddr = InetAddress.getByName(NicUtil.getIpAddress("default.nic", "default", true)); try { service.awaitRunning(100, TimeUnit.MILLISECONDS); @@ -197,8 +201,7 @@ tempFiles.add(tmpFile); - tasks.add(new ReadResourceTask(InetAddress.getLocalHost(), - service.port, allowedUUID, tmpFile)); + tasks.add(new ReadResourceTask(thisInetAddr, service.port, allowedUUID, tmpFile)); } Modified: trunk/bigdata/src/test/com/bigdata/test/ExperimentDriver.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/test/ExperimentDriver.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata/src/test/com/bigdata/test/ExperimentDriver.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -68,6 +68,7 @@ import com.bigdata.journal.ProxyTestCase; import com.bigdata.util.NV; +import com.bigdata.util.config.NicUtil; /** * A harness for running comparison of different configurations. @@ -1364,13 +1365,9 @@ props.setProperty("os.arch.cpus", ""+SystemUtil.numProcessors()); try { - - props.setProperty("host",InetAddress.getLocalHost().getHostName()); - - } catch(UnknownHostException ex) { - /* - * ignore. - */ + props.setProperty( "host", NicUtil.getIpAddress("default.nic", "default", true) ); + } catch(Throwable t) { + t.printStackTrace(); } return props; Modified: trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -78,6 +78,7 @@ import com.bigdata.service.jini.JiniClient; import com.bigdata.service.jini.JiniClientConfig; import com.bigdata.service.jini.JiniFederation; +import com.bigdata.util.config.NicUtil; import com.bigdata.zookeeper.ZNodeCreatedWatcher; /** @@ -130,6 +131,8 @@ public final Properties properties; public final String[] jiniOptions; + private final String serviceIpAddr; + protected void toString(StringBuilder sb) { super.toString(sb); @@ -175,6 +178,12 @@ } else { log.warn("groups = " + Arrays.toString(this.groups)); } + + try { + this.serviceIpAddr = NicUtil.getIpAddress("default.nic", "default", false); + } catch(IOException e) { + throw new ConfigurationException(e.getMessage(), e); + } } /** @@ -471,8 +480,7 @@ final ServiceDir serviceDir = new ServiceDir(this.serviceDir); - final Hostname hostName = new Hostname(InetAddress.getLocalHost() - .getCanonicalHostName().toString()); + final Hostname hostName = new Hostname(serviceIpAddr); final ServiceUUID serviceUUID = new ServiceUUID(this.serviceUUID); Modified: trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/MaxClientServicesPerHostConstraint.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/MaxClientServicesPerHostConstraint.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/MaxClientServicesPerHostConstraint.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -10,6 +10,7 @@ import com.bigdata.jini.lookup.entry.ServiceItemFilterChain; import com.bigdata.service.IClientService; import com.bigdata.service.jini.JiniFederation; +import com.bigdata.util.config.NicUtil; /** * Constraint on the #of {@link IClientService}s on the same host. @@ -51,11 +52,9 @@ // */ // filter.add(ClientServiceFilter.INSTANCE); - final String hostname = InetAddress.getLocalHost().getHostName(); + final String hostname = NicUtil.getIpAddress("default.nic", "default", false); + final String canonicalHostname = hostname; - final String canonicalHostname = InetAddress.getLocalHost() - .getCanonicalHostName(); - // filters for _this_ host. filter.add(new HostnameFilter(new Hostname[] {// new Hostname(hostname),// Modified: trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/MaxDataServicesPerHostConstraint.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/MaxDataServicesPerHostConstraint.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/MaxDataServicesPerHostConstraint.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -11,6 +11,7 @@ import com.bigdata.service.IDataService; import com.bigdata.service.jini.JiniFederation; import com.bigdata.service.jini.lookup.DataServiceFilter; +import com.bigdata.util.config.NicUtil; /** * Constraint on the #of {@link IDataService}s on the same host. @@ -47,11 +48,9 @@ // only consider data services. filter.add(DataServiceFilter.INSTANCE); - final String hostname = InetAddress.getLocalHost().getHostName(); + final String hostname = NicUtil.getIpAddress("default.nic", "default", false); + final String canonicalHostname = hostname; - final String canonicalHostname = InetAddress.getLocalHost() - .getCanonicalHostName(); - // filters for _this_ host. filter.add(new HostnameFilter(new Hostname[] {// new Hostname(hostname),// Modified: trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/ZookeeperServerConfiguration.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/ZookeeperServerConfiguration.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata-jini/src/java/com/bigdata/jini/start/config/ZookeeperServerConfiguration.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -54,6 +54,7 @@ import com.bigdata.io.FileLockUtility; import com.bigdata.jini.start.IServiceListener; import com.bigdata.jini.start.process.ZookeeperProcessHelper; +import com.bigdata.util.config.NicUtil; import com.bigdata.zookeeper.ZooHelper; /** @@ -213,6 +214,8 @@ */ public final Map<String, String> other; + private final InetAddress thisInetAddr; + /** * Adds value to {@link #other} if found in the {@link Configuration}. * @@ -318,6 +321,14 @@ putIfDefined(config, Options.FORCE_SYNC, Boolean.TYPE); putIfDefined(config, Options.SKIP_ACL, Boolean.TYPE); + try { + thisInetAddr = InetAddress.getByName(NicUtil.getIpAddress("default.nic", "default", false)); + } catch(IOException e) { + throw new ConfigurationException(e.getMessage(), e); + } + if (log.isInfoEnabled()) { + log.info("zookeeper host="+thisInetAddr.getCanonicalHostName()); + } } /** @@ -541,7 +552,7 @@ */ public V call() throws Exception { - if (ZooHelper.isRunning(InetAddress.getLocalHost(), clientPort)) { + if (ZooHelper.isRunning(thisInetAddr, clientPort)) { /* * Query for an instance already running on local host at that @@ -552,7 +563,7 @@ * instance on the localhost. */ - ZooHelper.ruok(InetAddress.getLocalHost(), clientPort); + ZooHelper.ruok(thisInetAddr, clientPort); throw new ZookeeperRunningException( "Zookeeper already running on localhost: clientport=" @@ -625,7 +636,7 @@ * Note: We don't test this until we have the file lock. */ - ZooHelper.ruok(InetAddress.getLocalHost(), clientPort); + ZooHelper.ruok(thisInetAddr, clientPort); throw new ZookeeperRunningException( "Zookeeper already running on localhost: clientport=" @@ -727,7 +738,7 @@ * clientPort. That could have already been true. */ - ZooHelper.ruok(InetAddress.getLocalHost(), clientPort); + ZooHelper.ruok(thisInetAddr, clientPort); // adjust for time remaining. nanos = (System.nanoTime() - begin); Modified: trunk/bigdata-jini/src/java/com/bigdata/jini/start/process/JiniCoreServicesProcessHelper.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/jini/start/process/JiniCoreServicesProcessHelper.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata-jini/src/java/com/bigdata/jini/start/process/JiniCoreServicesProcessHelper.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -42,6 +42,7 @@ import com.bigdata.service.jini.JiniClientConfig; import com.bigdata.service.jini.util.JiniServicesHelper; import com.bigdata.service.jini.util.LookupStarter; +import com.bigdata.util.config.NicUtil; /** * Class for starting the jini services. @@ -169,7 +170,7 @@ */ if (log.isInfoEnabled()) - log.info("Will start instance: " + InetAddress.getLocalHost() + log.info("Will start instance: " + NicUtil.getIpAddress("default.nic", "default", false) + ", config=" + config); final JiniCoreServicesStarter<JiniCoreServicesProcessHelper> serviceStarter = serviceConfig Modified: trunk/bigdata-jini/src/java/com/bigdata/jini/start/process/ZookeeperProcessHelper.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/jini/start/process/ZookeeperProcessHelper.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata-jini/src/java/com/bigdata/jini/start/process/ZookeeperProcessHelper.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -16,6 +16,7 @@ import com.bigdata.jini.start.config.ZookeeperServerConfiguration; import com.bigdata.jini.start.config.ZookeeperServerEntry; import com.bigdata.jini.start.config.ZookeeperServerConfiguration.ZookeeperRunningException; +import com.bigdata.util.config.NicUtil; import com.bigdata.zookeeper.ZooHelper; /** @@ -40,6 +41,14 @@ protected final int clientPort; + protected static InetAddress thisInetAddr = null; + static { + try { + thisInetAddr = InetAddress.getByName + (NicUtil.getIpAddress("default.nic", "default", false)); + } catch (Throwable t) { /* swallow */ } + } + /** * @param name * @param builder @@ -144,11 +153,11 @@ final ZookeeperServerConfiguration serverConfig = new ZookeeperServerConfiguration( config); - if (ZooHelper.isRunning(InetAddress.getLocalHost(), serverConfig.clientPort)) { + if (ZooHelper.isRunning(thisInetAddr, serverConfig.clientPort)) { if (log.isInfoEnabled()) log.info("Zookeeper already running: " - + InetAddress.getLocalHost().getCanonicalHostName() + + thisInetAddr.getCanonicalHostName() + ":" + serverConfig.clientPort); // will not consider start. Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/LoadBalancerServer.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/LoadBalancerServer.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/LoadBalancerServer.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -31,6 +31,7 @@ import com.bigdata.service.jini.util.DumpFederation; import com.bigdata.service.jini.util.DumpFederation.FormatRecord; import com.bigdata.service.jini.util.DumpFederation.FormatTabTable; +import com.bigdata.util.config.NicUtil; import com.bigdata.util.httpd.AbstractHTTPD; import com.bigdata.util.httpd.NanoHTTPD; import com.bigdata.util.httpd.NanoHTTPD.Response; @@ -547,15 +548,10 @@ * This exception gets thrown if the client has made a direct * (vs RMI) call. */ - try { - - clientAddr = Inet4Address.getLocalHost(); - - } catch (UnknownHostException ex) { - - return "localhost"; - + clientAddr = InetAddress.getByName(NicUtil.getIpAddress("default.nic", "default", false)); + } catch(Throwable t) {//for now, maintain the same failure logic as used previously + return NicUtil.getIpAddressByLocalHost(); } } Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/BroadcastSighup.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/BroadcastSighup.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/BroadcastSighup.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -39,6 +39,7 @@ import com.bigdata.service.ILoadBalancerService; import com.bigdata.service.jini.JiniClient; import com.bigdata.service.jini.JiniFederation; +import com.bigdata.util.config.NicUtil; import java.net.InetAddress; import net.jini.config.Configuration; import net.jini.core.entry.Entry; @@ -158,8 +159,8 @@ // Set up the service template and filter used to identify the service. - final String hostname = InetAddress.getLocalHost() - .getCanonicalHostName().toString(); + final String hostname = + NicUtil.getIpAddress("default.nic", "default", false); ServiceTemplate template = new ServiceTemplate(null, new Class[] { iface }, null); ServiceItemFilter thisHostFilter = null; Modified: trunk/bigdata-jini/src/java/com/bigdata/zookeeper/ZooHelper.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/zookeeper/ZooHelper.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata-jini/src/java/com/bigdata/zookeeper/ZooHelper.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -42,6 +42,7 @@ import org.apache.zookeeper.KeeperException.NoNodeException; import com.bigdata.jini.start.config.AbstractHostConstraint; +import com.bigdata.util.config.NicUtil; /** * Utility class for issuing the four letter commands to a zookeeper service. @@ -53,6 +54,14 @@ protected static final Logger log = Logger.getLogger(ZooHelper.class); + private static InetAddress thisInetAddr = null; + static { + try { + thisInetAddr = InetAddress.getByName + (NicUtil.getIpAddress("default.nic", "default", false)); + } catch (Throwable t) { /* swallow */ } + } + /** * Inquires whether a zookeeper instance is running in a non-error state and * returns iff the service reports "imok". @@ -129,7 +138,7 @@ if (log.isInfoEnabled()) log.info("Killing service: @ port=" + clientPort); - final Socket socket = new Socket(InetAddress.getLocalHost(), clientPort); + final Socket socket = new Socket(thisInetAddr, clientPort); try { @@ -180,7 +189,7 @@ if (log.isInfoEnabled()) log.info("hostname=" + addr + ", port=" + clientPort); - final Socket socket = new Socket(InetAddress.getLocalHost(), clientPort); + final Socket socket = new Socket(thisInetAddr, clientPort); try { @@ -251,7 +260,7 @@ if (log.isInfoEnabled()) log.info("hostname=" + addr + ", port=" + clientPort); - final Socket socket = new Socket(InetAddress.getLocalHost(), clientPort); + final Socket socket = new Socket(thisInetAddr, clientPort); try { Modified: trunk/bigdata-jini/src/test/com/bigdata/jini/start/config/TestZookeeperServerEntry.java =================================================================== --- trunk/bigdata-jini/src/test/com/bigdata/jini/start/config/TestZookeeperServerEntry.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata-jini/src/test/com/bigdata/jini/start/config/TestZookeeperServerEntry.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -27,6 +27,7 @@ package com.bigdata.jini.start.config; +import java.io.IOException; import java.net.InetAddress; import java.net.SocketException; import java.net.UnknownHostException; @@ -34,6 +35,8 @@ import junit.framework.TestCase2; import net.jini.config.ConfigurationException; +import com.bigdata.util.config.NicUtil; + /** * Unit tests for the {@link ZookeeperServerEntry}. * @@ -97,9 +100,9 @@ * @throws SocketException * @throws UnknownHostException */ - public void test002() throws ConfigurationException, SocketException, UnknownHostException { + public void test002() throws ConfigurationException, SocketException, UnknownHostException, IOException { - final String server = InetAddress.getLocalHost().getCanonicalHostName(); + final String server = NicUtil.getIpAddress("default.nic", "default", true); final String[] hosts = new String[] { "127.0.0.1", Modified: trunk/bigdata-jini/src/test/com/bigdata/service/jini/AbstractServerTestCase.java =================================================================== --- trunk/bigdata-jini/src/test/com/bigdata/service/jini/AbstractServerTestCase.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata-jini/src/test/com/bigdata/service/jini/AbstractServerTestCase.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -45,6 +45,8 @@ import com.bigdata.service.IDataService; import com.bigdata.service.MetadataService; import com.sun.jini.tool.ClassServer; +import com.bigdata.util.config.ConfigDeployUtil; +import com.bigdata.util.config.NicUtil; /** * Abstract base class for tests of remote services. @@ -366,8 +368,7 @@ */ // get the hostname. - InetAddress addr = InetAddress.getLocalHost(); - String hostname = addr.getHostName(); + String hostname = NicUtil.getIpAddress("default.nic", "default", true); // Find the service registrar (unicast protocol). final int timeout = 4*1000; // seconds. Modified: trunk/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java =================================================================== --- trunk/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -36,6 +36,7 @@ import java.util.concurrent.TimeUnit; import com.bigdata.util.concurrent.DaemonThreadFactory; +import com.bigdata.util.config.NicUtil; import edu.lehigh.swat.bench.ubt.api.QueryResult; import edu.lehigh.swat.bench.ubt.api.Repository; @@ -81,9 +82,10 @@ */ String hostname; try { - hostname = InetAddress.getLocalHost().getHostName(); - } catch (UnknownHostException ex) { - hostname = "localhost"; + hostname = NicUtil.getIpAddress("default.nic", "default", false); + } catch(Throwable t) {//for now, maintain same failure logic as used previously + t.printStackTrace(); + s = NicUtil.getIpAddressByLocalHost(); } QUERY_TEST_RESULT_FILE = hostname + "-result.txt"; } else { Modified: trunk/bigdata-rdf/src/test/com/bigdata/rdf/metrics/TestMetrics.java =================================================================== --- trunk/bigdata-rdf/src/test/com/bigdata/rdf/metrics/TestMetrics.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata-rdf/src/test/com/bigdata/rdf/metrics/TestMetrics.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -48,6 +48,7 @@ import com.bigdata.rdf.rio.LoadStats; import com.bigdata.rdf.store.DataLoader; import com.bigdata.rdf.store.DataLoader.ClosureEnum; +import com.bigdata.util.config.NicUtil; /** * Test harness for loading randomly generated files into a repository. @@ -588,7 +589,7 @@ * Write out the repositoryClass and all defined properties. */ // metricsWriter.write("repositoryClass, "+m_repo.getClass().getName()+"\n"); - metricsWriter.write("host, "+InetAddress.getLocalHost().getHostName()+"\n"); + metricsWriter.write("host, "+NicUtil.getIpAddress("default.nic", "default", true)+"\n"); if(true) { Map props = new TreeMap(PropertyUtil.flatten(getProperties())); Iterator itr = props.entrySet().iterator(); Modified: trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/AbstractServerTestCase.java =================================================================== --- trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/AbstractServerTestCase.java 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/AbstractServerTestCase.java 2010-07-28 21:14:27 UTC (rev 3338) @@ -50,6 +50,7 @@ import com.bigdata.service.IDataService; import com.bigdata.service.MetadataService; import com.bigdata.service.jini.AbstractServer; +import com.bigdata.util.config.NicUtil; import com.sun.jini.tool.ClassServer; /** @@ -371,8 +372,7 @@ */ // get the hostname. - InetAddress addr = InetAddress.getLocalHost(); - String hostname = addr.getHostName(); + String hostname = NicUtil.getIpAddress("default.nic", "default", true); // Find the service registrar (unicast protocol). final int timeout = 4*1000; // seconds. Modified: trunk/build.xml =================================================================== --- trunk/build.xml 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/build.xml 2010-07-28 21:14:27 UTC (rev 3338) @@ -1927,6 +1927,8 @@ </echo> <echo> policy: ${java.security.policy} </echo> + <echo> default.nic: ${default.nic} +</echo> <echo> hostname: ${this.hostname} </echo> <echo> preferIPv4: ${java.net.preferIPv4Stack} @@ -1989,6 +1991,7 @@ <sysproperty key="log4j.path" value="${bigdata.test.log4j.abs.path}"/> <sysproperty key="app.home" value="${app.home}"/> + <sysproperty key="default.nic" value="${default.nic}"/> <sysproperty key="classserver.jar" value="${dist.lib}/classserver.jar" /> <sysproperty key="colt.jar" value="${dist.lib}/colt.jar" /> Modified: trunk/src/resources/bin/pstart =================================================================== --- trunk/src/resources/bin/pstart 2010-07-28 20:47:53 UTC (rev 3337) +++ trunk/src/resources/bin/pstart 2010-07-28 21:14:27 UTC (rev 3338) @@ -1,6 +1,6 @@ #!/usr/bin/python -import os, sys, socket, getopt, fcntl, struct +import os, sys, socket, getopt, struct from os.path import dirname @@ -47,13 +47,17 @@ The path to the java.util.logging configuration file exportNic=<interfacename> - Specifies the name of the network interface on which the Jini service - will be exported. This property takes precedence over exportHost. - The default value is "eth0". + Specifies the name of the network interface to use by default + for service export and remote communication. This property + takes precedence over exportHost. The default value for this + property is "eth0". exportHost=<ipaddress|hostname> - Specifies the IP address on which the Jini service(s) will be exported. - This property is not used unless exportNic is "". + Specifies the IP address or host name to use when exporting + services for remote communication. This property will be + employed only when the value of the exportNic property + is set to the empty string ("") or a value that does not + correspond to any of the network interfaces on the system. bigdata.codebase.host=<interfacename|ipaddress|hostname> Specifies the network address of the codebase HTTP server. If the @@ -71,26 +75,6 @@ print "Services: \n " + ", ".join(serviceNames) -def get_interface_ipaddress(ifname): - """Gets the IP address assigned to a network interface. - - Parameters: - ifname The name of he interface whose IP address to get - - Returns: - A string contain the IP address of the interface - """ - if sys.platform == "linux2": - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - return socket.inet_ntoa(fcntl.ioctl( - s.fileno(), - 0x8915, # SIOCGIFADDR - struct.pack('256s', ifname[:15]) - )[20:24]) - else: - raise NotImplementedError("Not implemented for " + sys.platform) - - class Params: def __init__(self): self.serviceName = None @@ -109,9 +93,7 @@ # The name of the service self.serviceName = serviceName - # appHome is the base directory of the Java applications. In the - # source tree this is the top-level "java" directory. This will - # always be valid. + # appHome is the base directory of the application being started. self.appHome = params.appHome # A dictionary containing all of the Java system properties to set @@ -155,8 +137,8 @@ self.properties['bigdata.codebase.host'] = "eth0" self.properties['bigdata.codebase.port'] = "8081" - self.properties['log4j.configuration'] = \ - "src/resources/config/log4j.properties" + self.properties['log4j.configuration'] = os.path.join( \ + self.appHome, "var", "config", "logging", "log4j.properties") self.properties['log4j.primary.configuration'] = os.path.join( \ self.appHome, "var", "config", "logging", \ serviceName + "-logging.properties") @@ -170,17 +152,41 @@ """Using the params.groups, params.mGroups, and params.lookupLocators lists, create Java system properties appropriate for the service to convey it the information.""" - if len(params.groups) > 0: - self.properties['groupsToJoin'] = \ - "{\"" + "\",\"".join(params.groups) + "\"}" + if sys.platform == "win32": + if len(params.groups) > 0: + self.properties['groupsToJoin'] = \ + "\"" + "{" + \ + "\\" + \ + "\"" + \ + "\\\",\\\"".join(params.groups) + \ + "\\" + \ + "\"" + \ + "}" + "\"" + else: + self.properties['groupsToJoin'] = "\"" + "{}" + "\"" + if len(params.lookupLocators) > 0: + self.properties['locsToJoin'] = \ + "\"" + "{" + \ + "new LookupLocator(\\\"" + \ + "\\\"),new LookupLocator(\\\"".join(params.lookupLocators) + \ + "\\" + \ + "\"" + \ + ")" + \ + "}" + "\"" + else: + self.properties['locsToJoin'] = "\"" + "{}" + "\"" else: - self.properties['groupsToJoin'] = "{}" - if len(params.lookupLocators) > 0: - self.properties['locsToJoin'] = "{new LookupLocator(\"" + \ - "\"),new LookupLocator(\"".join(params.lookupLocators) + \ - "\")}" - else: - self.properties['locsToJoin'] = "{}" + if len(params.groups) > 0: + self.properties['groupsToJoin'] = \ + "{\"" + "\",\"".join(params.groups) + "\"}" + else: + self.properties['groupsToJoin'] = "{}" + if len(params.lookupLocators) > 0: + self.properties['locsToJoin'] = "{new LookupLocator(\"" + \ + "\"),new LookupLocator(\"".join(params.lookupLocators) + \ + "\")}" + else: + self.properties['locsToJoin'] = "{}" def setProperties(self, propList): for k, v in propList: @@ -222,7 +228,7 @@ self.migrateProperties() # Build the list of command line arguments - argList = [ "java", "-cp", ":".join(starterClasspath) ] + argList = [ "java", "-cp", os.pathsep.join(starterClasspath) ] propNames = self.properties.keys() propNames.sort() for p in propNames: @@ -281,22 +287,50 @@ # Construct the ServiceStarter Service Descriptor list if params.startCodebaseHttpd and serviceName != "httpd": - self.serviceStarterArgs = [ - "com.sun.jini.start.serviceDescriptors = " + \ - "new ServiceDescriptor[] { httpdDescriptor, " + \ - serviceName + "Descriptor }" ] + if sys.platform == "win32": + self.serviceStarterArgs = [ + "\"" + \ + "com.sun.jini.start.serviceDescriptors = " + \ + "new ServiceDescriptor[] { httpdDescriptor, " + \ + serviceName + "Descriptor }" + \ + "\"" ] + else: + self.serviceStarterArgs = [ + "com.sun.jini.start.serviceDescriptors = " + \ + "new ServiceDescriptor[] { httpdDescriptor, " + \ + serviceName + "Descriptor }" ] else: - self.serviceStarterArgs = [ - "com.sun.jini.start.serviceDescriptors = " + \ - "new ServiceDescriptor[] { " + serviceName + "Descriptor }" ] + if sys.platform == "win32": + self.serviceStarterArgs = [ + "\"" + \ + "com.sun.jini.start.serviceDescriptors = " + \ + "new ServiceDescriptor[] { " + serviceName + "Descriptor }" + \ + "\"" ] + else: + self.serviceStarterArgs = [ + "com.sun.jini.start.serviceDescriptors = " + \ + "new ServiceDescriptor[] { " + serviceName + "Descriptor }" ] def setGroupProperties(self): BigdataServiceBase.setGroupProperties(self) - if len(params.mGroups) > 0: - self.properties['memberGroups'] = \ - "{\"" + "\",\"".join(params.mGroups) + "\"}" + if sys.platform == "win32": + if len(params.mGroups) > 0: + self.properties['memberGroups'] = \ + "\"" + "{" + \ + "\\" + \ + "\"" + \ + "\\\",\\\"".join(params.mGroups) + \ + "\\" + \ + "\"" + \ + "}" + "\"" + else: + self.properties['memberGroups'] = "\"" + "{}" + "\"" else: - self.properties['memberGroups'] = "{}" + if len(params.mGroups) > 0: + self.properties['memberGroups'] = \ + "{\"" + "\",\"".join(params.mGroups) + "\"}" + else: + self.properties['memberGroups'] = "{}" class BigdataMetaservice(BigdataService): @@ -328,10 +362,18 @@ break # Construct the ServiceStarter Service Descriptor list - self.serviceStarterArgs = [ - "com.sun.jini.start.serviceDescriptors = " + \ - "new ServiceDescriptor[] {" + \ - "Descriptor, ".join(services) + "Descriptor}" ] + if sys.platform == "win32": + self.serviceStarterArgs = [ + "\"" + + "com.sun.jini.start.serviceDescriptors = " + \ + "new ServiceDescriptor[] {" + \ + "Descriptor, ".join(services) + "Descriptor}" + \ + "\"" ] + else: + self.serviceStarterArgs = [ + "com.sun.jini.start.serviceDescriptors = " + \ + "new ServiceDescriptor[] {" + \ + "Descriptor, ".join(services) + "Descriptor}" ] class BigdataServiceOldLog(BigdataService): @@ -420,7 +462,7 @@ # BTM - params.appHome = dirname(dirname( os.path.abspath(sys.argv[0])) ) - print "appHome=" + params.appHome + # print "appHome=" + params.appHome # Instiantate the object for the service serviceName = args[0] This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <btm...@us...> - 2010-07-29 17:44:07
|
Revision: 3351 http://bigdata.svn.sourceforge.net/bigdata/?rev=3351&view=rev Author: btmurphy Date: 2010-07-29 17:44:01 +0000 (Thu, 29 Jul 2010) Log Message: ----------- [trunk]: modified browser and disco-tool to fallback to a reasonable default nic on failure to support running these tools on windows Modified Paths: -------------- trunk/bigdata-jini/src/java/com/bigdata/disco/config/disco.config trunk/src/resources/bin/config/browser.config trunk/src/resources/bin/config/reggie.config trunk/src/resources/bin/config/serviceStarter.config trunk/src/resources/bin/config/zookeeper.config trunk/src/resources/bin/disco-tool trunk/src/resources/bin/pstart trunk/src/resources/config/jini/reggie.config Modified: trunk/bigdata-jini/src/java/com/bigdata/disco/config/disco.config =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/disco/config/disco.config 2010-07-29 17:30:26 UTC (rev 3350) +++ trunk/bigdata-jini/src/java/com/bigdata/disco/config/disco.config 2010-07-29 17:44:01 UTC (rev 3351) @@ -18,8 +18,8 @@ com.bigdata.disco.DiscoveryTool { - private static exportIpAddr = - NicUtil.getIpAddress("${exportNic}", 0, "${exportHost}"); + private static exportIpAddr = + NicUtil.getIpAddress("default.nic", "default", true); lookupLocatorConstraints = null; Modified: trunk/src/resources/bin/config/browser.config =================================================================== --- trunk/src/resources/bin/config/browser.config 2010-07-29 17:30:26 UTC (rev 3350) +++ trunk/src/resources/bin/config/browser.config 2010-07-29 17:44:01 UTC (rev 3351) @@ -9,12 +9,15 @@ import com.bigdata.util.config.NicUtil; com.sun.jini.example.browser { - private static exportIpAddr = NicUtil.getIpAddress("${exportNic}",0,"${exportHost}",true); + + private static exportIpAddr = + NicUtil.getIpAddress("default.nic", "default", true); + private static serverILFactory = new BasicILFactory( new BasicMethodConstraints( new InvocationConstraints( - new ConnectionRelativeTime(10000), null)), + new ConnectionRelativeTime(10000L), null)), null); listenerExporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(exportIpAddr,0), @@ -22,9 +25,17 @@ false, true); -// initialLookupGroups = new String[] { }; - initialLookupGroups = new String[] { System.getProperty("user.name") + "InstallVerifyGroup" }; + initialLookupGroups = new String[] { }; exitActionListener = new com.sun.jini.example.browser.Browser.Exit(); + + uninterestingInterfaces = + new String[] { "java.io.Serializable", + "java.rmi.Remote", + "net.jini.admin.Administrable", + "net.jini.core.constraint.RemoteMethodControl", + "net.jini.id.ReferentUuid", + "com.bigdata.service.EventReceivingService" + }; } net.jini.discovery.LookupDiscovery { Modified: trunk/src/resources/bin/config/reggie.config =================================================================== --- trunk/src/resources/bin/config/reggie.config 2010-07-29 17:30:26 UTC (rev 3350) +++ trunk/src/resources/bin/config/reggie.config 2010-07-29 17:44:01 UTC (rev 3351) @@ -14,15 +14,17 @@ com.sun.jini.reggie { - private static exportNic = "${exportNic}"; - private static exportIpAddr = NicUtil.getIpAddress("${exportNic}",0,"${exportHost}",true); - private static exportPort = Integer.parseInt("${exportPort}"); + private static exportIpAddr = + NicUtil.getIpAddress("default.nic", "default", false); + private static exportPort = + Integer.parseInt( System.getProperty("exportPort", "0") ); private static serverILFactory = new BasicILFactory( new BasicMethodConstraints( new InvocationConstraints( - new ConnectionRelativeTime(10000), null)), + new ConnectionRelativeTime(10000L), + null)), null); serverExporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(exportIpAddr,exportPort), @@ -30,11 +32,13 @@ false, true); - initialMemberGroups = new String[] { System.getProperty("user.name") + "InstallVerifyGroup" }; + initialMemberGroups = new String[] { System.getProperty("initialMemberGroups", System.getProperty("user.name")+"InstallVerifyGroup" ) }; + initialLookupGroups = initialMemberGroups; + initialLookupLocators = new LookupLocator[] { }; unicastDiscoveryHost = exportIpAddr; multicastInterfaces = new NetworkInterface[] { - NicUtil.getNetworkInterface(exportNic) + NicUtil.getNetworkInterface(exportIpAddr) }; minMaxServiceLease = 60000L; @@ -43,6 +47,6 @@ net.jini.discovery.LookupDiscovery { multicastRequestHost = com.sun.jini.reggie.exportIpAddr; multicastInterfaces = new NetworkInterface[] { - NicUtil.getNetworkInterface(com.sun.jini.reggie.exportNic) + NicUtil.getNetworkInterface(com.sun.jini.reggie.exportIpAddr) }; } Modified: trunk/src/resources/bin/config/serviceStarter.config =================================================================== --- trunk/src/resources/bin/config/serviceStarter.config 2010-07-29 17:30:26 UTC (rev 3350) +++ trunk/src/resources/bin/config/serviceStarter.config 2010-07-29 17:44:01 UTC (rev 3351) @@ -5,6 +5,7 @@ * well as the Jini lookup service and browser. */ import com.bigdata.util.config.ConfigurationUtil; +import com.bigdata.util.config.NicUtil; import com.sun.jini.config.ConfigUtil; import com.sun.jini.start.NonActivatableServiceDescriptor; @@ -13,16 +14,21 @@ com.sun.jini.start { + private static codebaseHost = + NicUtil.getIpAddress("bigdata.codebase.host", "default", false); + private static codebasePort = + Integer.parseInt( System.getProperty("bigdata.codebase.port", "0") ); + private static codebaseRootDir = + System.getProperty("bigdata.codebase.rootdDir", "." ); + private static jskCodebase = ConfigurationUtil.computeCodebase - ( "${bigdata.codebase.host}", - "jsk-dl.jar", Integer.parseInt("${bigdata.codebase.port}"), - "${bigdata.codebase.rootDir}", "none" ); + ( codebaseHost, "jsk-dl.jar", + codebasePort, codebaseRootDir, "none" ); private static servicePolicyFile = "${appHome}${/}var${/}config${/}policy${/}service.policy"; - // For starting HTTP codebase class server private static httpdCodebase = ""; private static httpdPolicyFile = servicePolicyFile; @@ -39,15 +45,17 @@ httpdCodebase, httpdPolicyFile, httpdClasspath, httpdImplName, httpdArgsArray); - // For starting a zookeeper server - // - // It is expected that all zookeeper-specific code will be - // included in the classpath (zookeeper.jar), as part of the - // service platform, rather than being downloaded. Instead, + // It is expected that all service-specific code will be + // included in the classpath of the services being started + // (for example, bigdata.jar and zookeeper.jar), as part of + // the service platform, rather than being downloaded. Instead, // because bigdata is run with a class server serving the - // downloadable jini classes, the zookeeper codebase is set - // to include only the jini-specific downloaded classes. + // downloadable jini classes, the service codebases set below + // are defined to include only the jini-specific downloaded + // classes. + // For starting a zookeeper server (from the zookeeper codebase) + private static zookeeperCodebase = jskCodebase; private static zookeeperClasspath = "${appHome}${/}lib${/}zookeeper.jar"; @@ -70,9 +78,8 @@ // For starting a lookup service private static reggieServerCodebase = ConfigurationUtil.computeCodebase - ( "${bigdata.codebase.host}", - "reggie-dl.jar", Integer.parseInt("${bigdata.codebase.port}"), - "${bigdata.codebase.rootDir}", "none" ); + ( codebaseHost, "reggie-dl.jar", + codebasePort, codebaseRootDir, "none" ); private static reggieCodebase = ConfigUtil.concat( new String[] { reggieServerCodebase, " ", jskCodebase } ); @@ -98,9 +105,8 @@ // For starting a Jini browser private static browserServerCodebase = ConfigurationUtil.computeCodebase - ( "${bigdata.codebase.host}", - "browser-dl.jar", Integer.parseInt("${bigdata.codebase.port}"), - "${bigdata.codebase.rootDir}", "none" ); + ( codebaseHost, "browser-dl.jar", + codebasePort, codebaseRootDir, "none" ); private static browserCodebase = ConfigUtil.concat( new String[] { browserServerCodebase, " ", jskCodebase } ); Modified: trunk/src/resources/bin/config/zookeeper.config =================================================================== --- trunk/src/resources/bin/config/zookeeper.config 2010-07-29 17:30:26 UTC (rev 3350) +++ trunk/src/resources/bin/config/zookeeper.config 2010-07-29 17:44:01 UTC (rev 3351) @@ -1,4 +1,12 @@ - +/* Configuration file for the Zookeeper wrapper service; + * where the wrapper service implementation is provided + * in the Hadoop Zookeeper codebase, and belongs to the + * org.apache.zookeeper.server.quorum namespace. + * + * Note that such a wrapper service implementation has + * not yet been released as part of the Hadoop Zookeeper + * codebase. + */ import java.net.NetworkInterface; import com.sun.jini.config.ConfigUtil; @@ -11,17 +19,17 @@ import net.jini.core.discovery.LookupLocator; import net.jini.discovery.LookupDiscoveryManager; +import com.bigdata.util.config.ConfigDeployUtil; import com.bigdata.util.config.NicUtil; org.apache.zookeeper.server.quorum { - private static exportNic = "${exportNic}"; private static exportIpAddr = - NicUtil.getIpAddress("${exportNic}",0,"${exportHost}",true); - private static exportPort = Integer.parseInt("${exportPort}"); + NicUtil.getIpAddress("default.nic", "default", false); + private static exportPort = + Integer.parseInt( System.getProperty("exportPort", "0") ); - private static groupsToJoin = - new String[] { System.getProperty("user.name") + "InstallVerifyGroup" }; + private static groupsToJoin = new String[] { System.getProperty("groupsToJoin", System.getProperty("user.name")+"InstallVerifyGroup" ) }; private static locatorsToJoin = new LookupLocator[] { }; private static exporterTcpServerEndpoint = @@ -30,7 +38,8 @@ new BasicILFactory( new BasicMethodConstraints( new InvocationConstraints( - new ConnectionRelativeTime(10000), null)), + new ConnectionRelativeTime(10000L), + null)), null); serverExporter = @@ -45,7 +54,10 @@ null, this); // Where service state is persisted - persistenceDirectory = "${appHome}${/}var${/}state${/}zookeeper"; + persistenceDirectory = + ConfigUtil.concat + ( new String[] { System.getProperty("app.home", "${user.dir}"), + "${/}var${/}state${/}zookeeperState" } ); zookeeperDataDir = "data"; zookeeperDataLogDir = "data.log"; @@ -58,19 +70,20 @@ // If standard zookeeper config is specified, // it will override jini config; for example, - //zookeeperConfigFile = "${user.home}${/}tmp${/}zookeeper${/}conf${/}test-zookeeper-q3.cfg"; + //zookeeperConfigFile = + // "${user.home}${/}tmp${/}zookeeper${/}conf${/}test-zookeeper-q3.cfg"; } net.jini.discovery.LookupDiscovery { multicastRequestHost = org.apache.zookeeper.server.quorum.exportIpAddr; multicastInterfaces = new NetworkInterface[] { - NicUtil.getNetworkInterface(org.apache.zookeeper.server.quorum.exportNic) + NicUtil.getNetworkInterface(org.apache.zookeeper.server.quorum.exportIpAddr) }; } net.jini.lookup.ServiceDiscoveryManager { - eventListenerExporter = new BasicJeriExporter - (org.apache.zookeeper.server.quorum.exporterTcpServerEndpoint, - org.apache.zookeeper.server.quorum.serverILFactory, - false, false); + eventListenerExporter = + new BasicJeriExporter + (org.apache.zookeeper.server.quorum.exporterTcpServerEndpoint, + org.apache.zookeeper.server.quorum.serverILFactory, false, false); } Modified: trunk/src/resources/bin/disco-tool =================================================================== --- trunk/src/resources/bin/disco-tool 2010-07-29 17:30:26 UTC (rev 3350) +++ trunk/src/resources/bin/disco-tool 2010-07-29 17:44:01 UTC (rev 3351) @@ -71,16 +71,22 @@ Prints this help message. Useful properties: - exportNic=<interfacename> + default.nic=<interfacename> Specifies the name of the network interface on which the ServiceDiscoveryManager's remote event listener will be exported. This - property takes precedence over entityExportHost. The default value is - "eth0". + property takes precedence over exportHost. The default behavior + regarding this property (in conjunction with the use of NicUtil + in the disco.config configuration file to retrieve the interface's + associated ip address) is to direct the configuration file to use + the IPv4 address of the first active network inteface that can + be found on the system. exportHost=<ipaddress|hostname> - Specifies the IP address on which the ServiceDiscoveryManager's remote - event listener will be exported. This property is not used unless - exportNic is "". + Specifies the IP address on which the ServiceDiscoveryManager's + remote event listener will be exported. This property will be + employed only when the value of the default.nic property + is set to the empty string ("") or a value that does not + correspond to any of the network interfaces on the system. Examples: Show information about all services, discovered through all @@ -158,7 +164,7 @@ java_props = { "java.security.manager": "", "java.net.preferIPv4Stack": "true", - "exportNic": "eth0", + "default.nic": "${default.nic}", "networkInterface": "all", "exportHost": socket.gethostname() } Modified: trunk/src/resources/bin/pstart =================================================================== --- trunk/src/resources/bin/pstart 2010-07-29 17:30:26 UTC (rev 3350) +++ trunk/src/resources/bin/pstart 2010-07-29 17:44:01 UTC (rev 3351) @@ -46,16 +46,20 @@ java.util.logging.config.file=/path/to/jini.logging The path to the java.util.logging configuration file - exportNic=<interfacename> + default.nic=<interfacename> Specifies the name of the network interface to use by default for service export and remote communication. This property - takes precedence over exportHost. The default value for this - property is "eth0". + takes precedence over exportHost. The default behavior + regarding this property (in conjunction with configuration + files that use NicUtil to retrieve the interface's associated + ip address) is to direct the configuration file to use the + IPv4 address of the first active network inteface that can + be found on the system. exportHost=<ipaddress|hostname> Specifies the IP address or host name to use when exporting services for remote communication. This property will be - employed only when the value of the exportNic property + employed only when the value of the default.nic property is set to the empty string ("") or a value that does not correspond to any of the network interfaces on the system. @@ -63,11 +67,15 @@ Specifies the network address of the codebase HTTP server. If the value is an interface name, the IP address assigned to that interface will be used. If the value is an IP address or hostname, that value - will be used directly. The default value is "eth0". + will be used directly. The default behavior regarding this property + (in conjunction with configuration files that use NicUtil to + retrieve the interface's associated ip address) is to direct the + configuration file to use the IPv4 address of the first active + network inteface that can be found on the system. bigdata.codebase.port=<n> The port number on <bigdata.codebase.host> on which the HTTP class - server is running. + server is listening. """ serviceNames = bigdataServiceMap.keys() @@ -131,10 +139,10 @@ self.properties['java.security.debug'] = "off" def setEntityProperties(self): - self.properties['exportNic'] = "eth0" + self.properties['default.nic'] = "${default.nic}" self.properties['exportHost'] = socket.gethostname() self.properties['exportPort'] = "0" - self.properties['bigdata.codebase.host'] = "eth0" + self.properties['bigdata.codebase.host'] = "${bigdata.codebase.host}" self.properties['bigdata.codebase.port'] = "8081" self.properties['log4j.configuration'] = os.path.join( \ @@ -460,8 +468,6 @@ if params.appHome == "NOT_SET": params.appHome = dirname(dirname( os.path.abspath(sys.argv[0])) ) - # BTM - params.appHome = dirname(dirname( os.path.abspath(sys.argv[0])) ) - # print "appHome=" + params.appHome # Instiantate the object for the service Modified: trunk/src/resources/config/jini/reggie.config =================================================================== --- trunk/src/resources/config/jini/reggie.config 2010-07-29 17:30:26 UTC (rev 3350) +++ trunk/src/resources/config/jini/reggie.config 2010-07-29 17:44:01 UTC (rev 3351) @@ -16,7 +16,6 @@ private static exportIpAddr = NicUtil.getIpAddress("default.nic", "default", false); - private static exportPort = Integer.parseInt( System.getProperty("exportPort", "0") ); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-07-29 19:19:01
|
Revision: 3358 http://bigdata.svn.sourceforge.net/bigdata/?rev=3358&view=rev Author: thompsonbry Date: 2010-07-29 19:18:51 +0000 (Thu, 29 Jul 2010) Log Message: ----------- cleaned up the licenses files a bit. Added Paths: ----------- trunk/lgpl-utils/LEGAL/lgpl-utils-license.txt Removed Paths: ------------- trunk/dsi-utils/LEGAL/LICENSE.txt trunk/lgpl-utils/LEGAL/LICENSE.txt Deleted: trunk/dsi-utils/LEGAL/LICENSE.txt =================================================================== --- trunk/dsi-utils/LEGAL/LICENSE.txt 2010-07-29 19:14:11 UTC (rev 3357) +++ trunk/dsi-utils/LEGAL/LICENSE.txt 2010-07-29 19:18:51 UTC (rev 3358) @@ -1,504 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 2.1, February 1999 - - Copyright (C) 1991, 1999 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - -[This is the first released version of the Lesser GPL. It also counts - as the successor of the GNU Library Public License, version 2, hence - the version number 2.1.] - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -Licenses are intended to guarantee your freedom to share and change -free software--to make sure the software is free for all its users. - - This license, the Lesser General Public License, applies to some -specially designated software packages--typically libraries--of the -Free Software Foundation and other authors who decide to use it. You -can use it too, but we suggest you first think carefully about whether -this license or the ordinary General Public License is the better -strategy to use in any particular case, based on the explanations below. - - When we speak of free software, we are referring to freedom of use, -not price. Our General Public Licenses are designed to make sure that -you have the freedom to distribute copies of free software (and charge -for this service if you wish); that you receive source code or can get -it if you want it; that you can change the software and use pieces of -it in new free programs; and that you are informed that you can do -these things. - - To protect your rights, we need to make restrictions that forbid -distributors to deny you these rights or to ask you to surrender these -rights. These restrictions translate to certain responsibilities for -you if you distribute copies of the library or if you modify it. - - For example, if you distribute copies of the library, whether gratis -or for a fee, you must give the recipients all the rights that we gave -you. You must make sure that they, too, receive or can get the source -code. If you link other code with the library, you must provide -complete object files to the recipients, so that they can relink them -with the library after making changes to the library and recompiling -it. And you must show them these terms so they know their rights. - - We protect your rights with a two-step method: (1) we copyright the -library, and (2) we offer you this license, which gives you legal -permission to copy, distribute and/or modify the library. - - To protect each distributor, we want to make it very clear that -there is no warranty for the free library. Also, if the library is -modified by someone else and passed on, the recipients should know -that what they have is not the original version, so that the original -author's reputation will not be affected by problems that might be -introduced by others. - - Finally, software patents pose a constant threat to the existence of -any free program. We wish to make sure that a company cannot -effectively restrict the users of a free program by obtaining a -restrictive license from a patent holder. Therefore, we insist that -any patent license obtained for a version of the library must be -consistent with the full freedom of use specified in this license. - - Most GNU software, including some libraries, is covered by the -ordinary GNU General Public License. This license, the GNU Lesser -General Public License, applies to certain designated libraries, and -is quite different from the ordinary General Public License. We use -this license for certain libraries in order to permit linking those -libraries into non-free programs. - - When a program is linked with a library, whether statically or using -a shared library, the combination of the two is legally speaking a -combined work, a derivative of the original library. The ordinary -General Public License therefore permits such linking only if the -entire combination fits its criteria of freedom. The Lesser General -Public License permits more lax criteria for linking other code with -the library. - - We call this license the "Lesser" General Public License because it -does Less to protect the user's freedom than the ordinary General -Public License. It also provides other free software developers Less -of an advantage over competing non-free programs. These disadvantages -are the reason we use the ordinary General Public License for many -libraries. However, the Lesser license provides advantages in certain -special circumstances. - - For example, on rare occasions, there may be a special need to -encourage the widest possible use of a certain library, so that it becomes -a de-facto standard. To achieve this, non-free programs must be -allowed to use the library. A more frequent case is that a free -library does the same job as widely used non-free libraries. In this -case, there is little to gain by limiting the free library to free -software only, so we use the Lesser General Public License. - - In other cases, permission to use a particular library in non-free -programs enables a greater number of people to use a large body of -free software. For example, permission to use the GNU C Library in -non-free programs enables many more people to use the whole GNU -operating system, as well as its variant, the GNU/Linux operating -system. - - Although the Lesser General Public License is Less protective of the -users' freedom, it does ensure that the user of a program that is -linked with the Library has the freedom and the wherewithal to run -that program using a modified version of the Library. - - The precise terms and conditions for copying, distribution and -modification follow. Pay close attention to the difference between a -"work based on the library" and a "work that uses the library". The -former contains code derived from the library, whereas the latter must -be combined with the library in order to run. - - GNU LESSER GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License Agreement applies to any software library or other -program which contains a notice placed by the copyright holder or -other authorized party saying it may be distributed under the terms of -this Lesser General Public License (also called "this License"). -Each licensee is addressed as "you". - - A "library" means a collection of software functions and/or data -prepared so as to be conveniently linked with application programs -(which use some of those functions and data) to form executables. - - The "Library", below, refers to any such software library or work -which has been distributed under these terms. A "work based on the -Library" means either the Library or any derivative work under -copyright law: that is to say, a work containing the Library or a -portion of it, either verbatim or with modifications and/or translated -straightforwardly into another language. (Hereinafter, translation is -included without limitation in the term "modification".) - - "Source code" for a work means the preferred form of the work for -making modifications to it. For a library, complete source code means -all the source code for all modules it contains, plus any associated -interface definition files, plus the scripts used to control compilation -and installation of the library. - - Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running a program using the Library is not restricted, and output from -such a program is covered only if its contents constitute a work based -on the Library (independent of the use of the Library in a tool for -writing it). Whether that is true depends on what the Library does -and what the program that uses the Library does. - - 1. You may copy and distribute verbatim copies of the Library's -complete source code as you receive it, in any medium, provided that -you conspicuously and appropriately publish on each copy an -appropriate copyright notice and disclaimer of warranty; keep intact -all the notices that refer to this License and to the absence of any -warranty; and distribute a copy of this License along with the -Library. - - You may charge a fee for the physical act of transferring a copy, -and you may at your option offer warranty protection in exchange for a -fee. - - 2. You may modify your copy or copies of the Library or any portion -of it, thus forming a work based on the Library, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) The modified work must itself be a software library. - - b) You must cause the files modified to carry prominent notices - stating that you changed the files and the date of any change. - - c) You must cause the whole of the work to be licensed at no - charge to all third parties under the terms of this License. - - d) If a facility in the modified Library refers to a function or a - table of data to be supplied by an application program that uses - the facility, other than as an argument passed when the facility - is invoked, then you must make a good faith effort to ensure that, - in the event an application does not supply such function or - table, the facility still operates, and performs whatever part of - its purpose remains meaningful. - - (For example, a function in a library to compute square roots has - a purpose that is entirely well-defined independent of the - application. Therefore, Subsection 2d requires that any - application-supplied function or table used by this function must - be optional: if the application does not supply it, the square - root function must still compute square roots.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Library, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Library, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote -it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Library. - -In addition, mere aggregation of another work not based on the Library -with the Library (or with a work based on the Library) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may opt to apply the terms of the ordinary GNU General Public -License instead of this License to a given copy of the Library. To do -this, you must alter all the notices that refer to this License, so -that they refer to the ordinary GNU General Public License, version 2, -instead of to this License. (If a newer version than version 2 of the -ordinary GNU General Public License has appeared, then you can specify -that version instead if you wish.) Do not make any other change in -these notices. - - Once this change is made in a given copy, it is irreversible for -that copy, so the ordinary GNU General Public License applies to all -subsequent copies and derivative works made from that copy. - - This option is useful when you wish to copy part of the code of -the Library into a program that is not a library. - - 4. You may copy and distribute the Library (or a portion or -derivative of it, under Section 2) in object code or executable form -under the terms of Sections 1 and 2 above provided that you accompany -it with the complete corresponding machine-readable source code, which -must be distributed under the terms of Sections 1 and 2 above on a -medium customarily used for software interchange. - - If distribution of object code is made by offering access to copy -from a designated place, then offering equivalent access to copy the -source code from the same place satisfies the requirement to -distribute the source code, even though third parties are not -compelled to copy the source along with the object code. - - 5. A program that contains no derivative of any portion of the -Library, but is designed to work with the Library by being compiled or -linked with it, is called a "work that uses the Library". Such a -work, in isolation, is not a derivative work of the Library, and -therefore falls outside the scope of this License. - - However, linking a "work that uses the Library" with the Library -creates an executable that is a derivative of the Library (because it -contains portions of the Library), rather than a "work that uses the -library". The executable is therefore covered by this License. -Section 6 states terms for distribution of such executables. - - When a "work that uses the Library" uses material from a header file -that is part of the Library, the object code for the work may be a -derivative work of the Library even though the source code is not. -Whether this is true is especially significant if the work can be -linked without the Library, or if the work is itself a library. The -threshold for this to be true is not precisely defined by law. - - If such an object file uses only numerical parameters, data -structure layouts and accessors, and small macros and small inline -functions (ten lines or less in length), then the use of the object -file is unrestricted, regardless of whether it is legally a derivative -work. (Executables containing this object code plus portions of the -Library will still fall under Section 6.) - - Otherwise, if the work is a derivative of the Library, you may -distribute the object code for the work under the terms of Section 6. -Any executables containing that work also fall under Section 6, -whether or not they are linked directly with the Library itself. - - 6. As an exception to the Sections above, you may also combine or -link a "work that uses the Library" with the Library to produce a -work containing portions of the Library, and distribute that work -under terms of your choice, provided that the terms permit -modification of the work for the customer's own use and reverse -engineering for debugging such modifications. - - You must give prominent notice with each copy of the work that the -Library is used in it and that the Library and its use are covered by -this License. You must supply a copy of this License. If the work -during execution displays copyright notices, you must include the -copyright notice for the Library among them, as well as a reference -directing the user to the copy of this License. Also, you must do one -of these things: - - a) Accompany the work with the complete corresponding - machine-readable source code for the Library including whatever - changes were used in the work (which must be distributed under - Sections 1 and 2 above); and, if the work is an executable linked - with the Library, with the complete machine-readable "work that - uses the Library", as object code and/or source code, so that the - user can modify the Library and then relink to produce a modified - executable containing the modified Library. (It is understood - that the user who changes the contents of definitions files in the - Library will not necessarily be able to recompile the application - to use the modified definitions.) - - b) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (1) uses at run time a - copy of the library already present on the user's computer system, - rather than copying library functions into the executable, and (2) - will operate properly with a modified version of the library, if - the user installs one, as long as the modified version is - interface-compatible with the version that the work was made with. - - c) Accompany the work with a written offer, valid for at - least three years, to give the same user the materials - specified in Subsection 6a, above, for a charge no more - than the cost of performing this distribution. - - d) If distribution of the work is made by offering access to copy - from a designated place, offer equivalent access to copy the above - specified materials from the same place. - - e) Verify that the user has already received a copy of these - materials or that you have already sent this user a copy. - - For an executable, the required form of the "work that uses the -Library" must include any data and utility programs needed for -reproducing the executable from it. However, as a special exception, -the materials to be distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies -the executable. - - It may happen that this requirement contradicts the license -restrictions of other proprietary libraries that do not normally -accompany the operating system. Such a contradiction means you cannot -use both them and the Library together in an executable that you -distribute. - - 7. You may place library facilities that are a work based on the -Library side-by-side in a single library together with other library -facilities not covered by this License, and distribute such a combined -library, provided that the separate distribution of the work based on -the Library and of the other library facilities is otherwise -permitted, and provided that you do these two things: - - a) Accompany the combined library with a copy of the same work - based on the Library, uncombined with any other library - facilities. This must be distributed under the terms of the - Sections above. - - b) Give prominent notice with the combined library of the fact - that part of it is a work based on the Library, and explaining - where to find the accompanying uncombined form of the same work. - - 8. You may not copy, modify, sublicense, link with, or distribute -the Library except as expressly provided under this License. Any -attempt otherwise to copy, modify, sublicense, link with, or -distribute the Library is void, and will automatically terminate your -rights under this License. However, parties who have received copies, -or rights, from you under this License will not have their licenses -terminated so long as such parties remain in full compliance. - - 9. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Library or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Library (or any work based on the -Library), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Library or works based on it. - - 10. Each time you redistribute the Library (or any work based on the -Library), the recipient automatically receives a license from the -original licensor to copy, distribute, link with or modify the Library -subject to these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties with -this License. - - 11. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Library at all. For example, if a patent -license would not permit royalty-free redistribution of the Library by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Library. - -If any portion of this section is held invalid or unenforceable under any -particular circumstance, the balance of the section is intended to apply, -and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 12. If the distribution and/or use of the Library is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Library under this License may add -an explicit geographical distribution limitation excluding those countries, -so that distribution is permitted only in or among countries not thus -excluded. In such case, this License incorporates the limitation as if -written in the body of this License. - - 13. The Free Software Foundation may publish revised and/or new -versions of the Lesser General Public License from time to time. -Such new versions will be similar in spirit to the present version, -but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Library -specifies a version number of this License which applies to it and -"any later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Library does not specify a -license version number, you may choose any version ever published by -the Free Software Foundation. - - 14. If you wish to incorporate parts of the Library into other free -programs whose distribution conditions are incompatible with these, -write to the author to ask for permission. For software which is -copyrighted by the Free Software Foundation, write to the Free -Software Foundation; we sometimes make exceptions for this. Our -decision will be guided by the two goals of preserving the free status -of all derivatives of our free software and of promoting the sharing -and reuse of software generally. - - NO WARRANTY - - 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY -KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE -LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME -THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU -FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR -CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE -LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING -RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A -FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF -SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH -DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Libraries - - If you develop a new library, and you want it to be of the greatest -possible use to the public, we recommend making it free software that -everyone can redistribute and change. You can do so by permitting -redistribution under these terms (or, alternatively, under the terms of the -ordinary General Public License). - - To apply these terms, attach the following notices to the library. It is -safest to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - <one line to give the library's name and a brief idea of what it does.> - Copyright (C) <year> <name of author> - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Also add information on how to contact you by electronic and paper mail. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the library, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - library `Frob' (a library for tweaking knobs) written by James Random Hacker. - - <signature of Ty Coon>, 1 April 1990 - Ty Coon, President of Vice - -That's all there is to it! - - Deleted: trunk/lgpl-utils/LEGAL/LICENSE.txt =================================================================== --- trunk/lgpl-utils/LEGAL/LICENSE.txt 2010-07-29 19:14:11 UTC (rev 3357) +++ trunk/lgpl-utils/LEGAL/LICENSE.txt 2010-07-29 19:18:51 UTC (rev 3358) @@ -1,504 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 2.1, February 1999 - - Copyright (C) 1991, 1999 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - -[This is the first released version of the Lesser GPL. It also counts - as the successor of the GNU Library Public License, version 2, hence - the version number 2.1.] - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -Licenses are intended to guarantee your freedom to share and change -free software--to make sure the software is free for all its users. - - This license, the Lesser General Public License, applies to some -specially designated software packages--typically libraries--of the -Free Software Foundation and other authors who decide to use it. You -can use it too, but we suggest you first think carefully about whether -this license or the ordinary General Public License is the better -strategy to use in any particular case, based on the explanations below. - - When we speak of free software, we are referring to freedom of use, -not price. Our General Public Licenses are designed to make sure that -you have the freedom to distribute copies of free software (and charge -for this service if you wish); that you receive source code or can get -it if you want it; that you can change the software and use pieces of -it in new free programs; and that you are informed that you can do -these things. - - To protect your rights, we need to make restrictions that forbid -distributors to deny you these rights or to ask you to surrender these -rights. These restrictions translate to certain responsibilities for -you if you distribute copies of the library or if you modify it. - - For example, if you distribute copies of the library, whether gratis -or for a fee, you must give the recipients all the rights that we gave -you. You must make sure that they, too, receive or can get the source -code. If you link other code with the library, you must provide -complete object files to the recipients, so that they can relink them -with the library after making changes to the library and recompiling -it. And you must show them these terms so they know their rights. - - We protect your rights with a two-step method: (1) we copyright the -library, and (2) we offer you this license, which gives you legal -permission to copy, distribute and/or modify the library. - - To protect each distributor, we want to make it very clear that -there is no warranty for the free library. Also, if the library is -modified by someone else and passed on, the recipients should know -that what they have is not the original version, so that the original -author's reputation will not be affected by problems that might be -introduced by others. - - Finally, software patents pose a constant threat to the existence of -any free program. We wish to make sure that a company cannot -effectively restrict the users of a free program by obtaining a -restrictive license from a patent holder. Therefore, we insist that -any patent license obtained for a version of the library must be -consistent with the full freedom of use specified in this license. - - Most GNU software, including some libraries, is covered by the -ordinary GNU General Public License. This license, the GNU Lesser -General Public License, applies to certain designated libraries, and -is quite different from the ordinary General Public License. We use -this license for certain libraries in order to permit linking those -libraries into non-free programs. - - When a program is linked with a library, whether statically or using -a shared library, the combination of the two is legally speaking a -combined work, a derivative of the original library. The ordinary -General Public License therefore permits such linking only if the -entire combination fits its criteria of freedom. The Lesser General -Public License permits more lax criteria for linking other code with -the library. - - We call this license the "Lesser" General Public License because it -does Less to protect the user's freedom than the ordinary General -Public License. It also provides other free software developers Less -of an advantage over competing non-free programs. These disadvantages -are the reason we use the ordinary General Public License for many -libraries. However, the Lesser license provides advantages in certain -special circumstances. - - For example, on rare occasions, there may be a special need to -encourage the widest possible use of a certain library, so that it becomes -a de-facto standard. To achieve this, non-free programs must be -allowed to use the library. A more frequent case is that a free -library does the same job as widely used non-free libraries. In this -case, there is little to gain by limiting the free library to free -software only, so we use the Lesser General Public License. - - In other cases, permission to use a particular library in non-free -programs enables a greater number of people to use a large body of -free software. For example, permission to use the GNU C Library in -non-free programs enables many more people to use the whole GNU -operating system, as well as its variant, the GNU/Linux operating -system. - - Although the Lesser General Public License is Less protective of the -users' freedom, it does ensure that the user of a program that is -linked with the Library has the freedom and the wherewithal to run -that program using a modified version of the Library. - - The precise terms and conditions for copying, distribution and -modification follow. Pay close attention to the difference between a -"work based on the library" and a "work that uses the library". The -former contains code derived from the library, whereas the latter must -be combined with the library in order to run. - - GNU LESSER GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License Agreement applies to any software library or other -program which contains a notice placed by the copyright holder or -other authorized party saying it may be distributed under the terms of -this Lesser General Public License (also called "this License"). -Each licensee is addressed as "you". - - A "library" means a collection of software functions and/or data -prepared so as to be conveniently linked with application programs -(which use some of those functions and data) to form executables. - - The "Library", below, refers to any such software library or work -which has been distributed under these terms. A "work based on the -Library" means either the Library or any derivative work under -copyright law: that is to say, a work containing the Library or a -portion of it, either verbatim or with modifications and/or translated -straightforwardly into another language. (Hereinafter, translation is -included without limitation in the term "modification".) - - "Source code" for a work means the preferred form of the work for -making modifications to it. For a library, complete source code means -all the source code for all modules it contains, plus any associated -interface definition files, plus the scripts used to control compilation -and installation of the library. - - Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running a program using the Library is not restricted, and output from -such a program is covered only if its contents constitute a work based -on the Library (independent of the use of the Library in a tool for -writing it). Whether that is true depends on what the Library does -and what the program that uses the Library does. - - 1. You may copy and distribute verbatim copies of the Library's -complete source code as you receive it, in any medium, provided that -you conspicuously and appropriately publish on each copy an -appropriate copyright notice and disclaimer of warranty; keep intact -all the notices that refer to this License and to the absence of any -warranty; and distribute a copy of this License along with the -Library. - - You may charge a fee for the physical act of transferring a copy, -and you may at your option offer warranty protection in exchange for a -fee. - - 2. You may modify your copy or copies of the Library or any portion -of it, thus forming a work based on the Library, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) The modified work must itself be a software library. - - b) You must cause the files modified to carry prominent notices - stating that you changed the files and the date of any change. - - c) You must cause the whole of the work to be licensed at no - charge to all third parties under the terms of this License. - - d) If a facility in the modified Library refers to a function or a - table of data to be supplied by an application program that uses - the facility, other than as an argument passed when the facility - is invoked, then you must make a good faith effort to ensure that, - in the event an application does not supply such function or - table, the facility still operates, and performs whatever part of - its purpose remains meaningful. - - (For example, a function in a library to compute square roots has - a purpose that is entirely well-defined independent of the - application. Therefore, Subsection 2d requires that any - application-supplied function or table used by this function must - be optional: if the application does not supply it, the square - root function must still compute square roots.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Library, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Library, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote -it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Library. - -In addition, mere aggregation of another work not based on the Library -with the Library (or with a work based on the Library) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may opt to apply the terms of the ordinary GNU General Public -License instead of this License to a given copy of the Library. To do -this, you must alter all the notices that refer to this License, so -that they refer to the ordinary GNU General Public License, version 2, -instead of to this License. (If a newer version than version 2 of the -ordinary GNU General Public License has appeared, then you can specify -that version instead if you wish.) Do not make any other change in -these notices. - - Once this change is made in a given copy, it is irreversible for -that copy, so the ordinary GNU General Public License applies to all -subsequent copies and derivative works made from that copy. - - This option is useful when you wish to copy part of the code of -the Library into a program that is not a library. - - 4. You may copy and distribute the Library (or a portion or -derivative of it, under Section 2) in object code or executable form -under the terms of Sections 1 and 2 above provided that you accompany -it with the complete corresponding machine-readable source code, which -must be distributed under the terms of Sections 1 and 2 above on a -medium customarily used for software interchange. - - If distribution of object code is made by offering access to copy -from a designated place, then offering equivalent access to copy the -source code from the same place satisfies the requirement to -distribute the source code, even though third parties are not -compelled to copy the source along with the object code. - - 5. A program that contains no derivative of any portion of the -Library, but is designed to work with the Library by being compiled or -linked with it, is called a "work that uses the Library". Such a -work, in isolation, is not a derivative work of the Library, and -therefore falls outside the scope of this License. - - However, linking a "work that uses the Library" with the Library -creates an executable that is a derivative of the Library (because it -contains portions of the Library), rather than a "work that uses the -library". The executable is therefore covered by this License. -Section 6 states terms for distribution of such executables. - - When a "work that uses the Library" uses material from a header file -that is part of the Library, the object code for the work may be a -derivative work of the Library even though the source code is not. -Whether this is true is especially significant if the work can be -linked without the Library, or if the work is itself a library. The -threshold for this to be true is not precisely defined by law. - - If such an object file uses only numerical parameters, data -structure layouts and accessors, and small macros and small inline -functions (ten lines or less in length), then the use of the object -file is unrestricted, regardless of whether it is legally a derivative -work. (Executables containing this object code plus portions of the -Library will still fall under Section 6.) - - Otherwise, if the work is a derivative of the Library, you may -distribute the object code for the work under the terms of Section 6. -Any executables containing that work also fall under Section 6, -whether or not they are linked directly with the Library itself. - - 6. As an exception to the Sections above, you may also combine or -link a "work that uses the Library" with the Library to produce a -work containing portions of the Library, and distribute that work -under terms of your choice, provided that the terms permit -modification of the work for the customer's own use and reverse -engineering for debugging such modifications. - - You must give prominent notice with each copy of the work that the -Library is used in it and that the Library and its use are covered by -this License. You must supply a copy of this License. If the work -during execution displays copyright notices, you must include the -copyright notice for the Library among them, as well as a reference -directing the user to the copy of this License. Also, you must do one -of these things: - - a) Accompany the work with the complete corresponding - machine-readable source code for the Library including whatever - changes were used in the work (which must be distributed under - Sections 1 and 2 above); and, if the work is an executable linked - with the Library, with the complete machine-readable "work that - uses the Library", as object code and/or source code, so that the - user can modify the Library and then relink to produce a modified - executable containing the modified Library. (It is understood - that the user who changes the contents of definitions files in the - Library will not necessarily be able to recompile the application - to use the modified definitions.) - - b) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (1) uses at run time a - copy of the library already present on the user's computer system, - rather than copying library functions into the executable, and (2) - will operate properly with a modified version of the library, if - the user installs one, as long as the modified version is - interface-compatible with the version that the work was made with. - - c) Accompany the work with a written offer, valid for at - least three years, to give the same user the materials - specified in Subsection 6a, above, for a charge no more - than the cost of performing this distribution. - - d) If distribution of the work is made by offering access to copy - from a designated place, offer equivalent access to copy the above - specified materials from the same place. - - e) Verify that the user has already received a copy of these - materials or that you have already sent this user a copy. - - For an executable, the required form of the "work that uses the -Library" must include any data and utility programs needed for -reproducing the executable from it. However, as a special exception, -the materials to be distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies -the executable. - - It may happen that this requirement contradicts the license -restrictions of other proprietary libraries that do not normally -accompany the operating system. Such a contradiction means you cannot -use both them and the Library together in an executable that you -distribute. - - 7. You may place library facilities that are a work based on the -Library side-by-side in a single library together with other library -facilities not covered by this License, and distribute such a combined -library, provided that the separate distribution of the work based on -the Library and of the other library facilities is otherwise -permitted, and provided that you do these two things: - - a) Accompany the combined library with a copy of the same work - based on the Library, uncombined with any other library - facilities. This must be distributed under the terms of the - Sections above. - - b) Give prominent notice with the combined library of the fact - that part of it is a work based on the Library, and explaining - where to find the accompanying uncombined form of the same work. - - 8. You may not copy, modify, sublicense, link with, or distribute -the Library except as expressly provided under this License. Any -attempt otherwise to copy, modify, sublicense, link with, or -distribute the Library is void, and will automatically terminate your -rights under this License. However, parties who have received copies, -or rights, from you under this License will not have their licenses -terminated so long as such parties remain in full compliance. - - 9. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Library or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Library (or any work based on the -Library), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Library or works based on it. - - 10. Each time you redistribute the Library (or any work based on the -Library), the recipient automatically receives a license from the -original licensor to copy, distribute, link with or modify the Library -subject to these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties with -this License. - - 11. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Library at all. For example, if a patent -license would not permit royalty-free redistribution of the Library by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Library. - -If any portion of this section is held invalid or unenforceable under any -particular circumstance, the balance of the section is intended to apply, -and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 12. If the distribution and/or use of the Library is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Library under this License may add -an explicit geographical distribution limitation excluding those countries, -so that distribution is permitted only in or among countries not thus -excluded. In such case, this License incorporates the limitation as if -written in the body of this License. - - 13. The Free Software Foundation may publish revised and/or new -versions of the Lesser General Public License from time to time. -Such new versions will be similar in spirit to the present version, -but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Library -specifies a version number of this License which applies to it and -"any later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Library does not specify a -license version number, you may choose any version ever published by -the Free Software Foundation. - - 14. If you wish to incorporate parts of the Library into other free -programs whose distribution conditions are incompatible with these, -write to the author to ask for permission. For software which is -copyrighted by the Free Software Foundation, write to the Free -Software Foundation; we sometimes make exceptions for this. Our -decision will be guided by the two goals of preserving the free status -of all derivatives of our free software and of promoting the sharing -and reuse of software generally. - - NO WARRANTY - - 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY -KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE -LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME -THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU -FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR -CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE -LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING -RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A -FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF -SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH -DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Libraries - - If you develop a new library, and you want it to be of the greatest -possible use to the public, we recommend making it free software that -everyone can redistribute and change. You can do so by permitting -redistribution under these terms (or, alternatively, under the terms of the -ordinary General Public License). - - To apply these terms, attach the following notices to the library. It is -safest to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - <one line to give the library's name and a brief idea of what it does.> - Copyright (C) <year> <name of author> - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Also add information on how to contact you by electronic and paper mail. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the library, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - library `Frob' (a library for tweaking knobs) written by James Random Hacker. - - <signature of Ty Coon>, 1 April 1990 - Ty Coon, President of Vice - -That's all there is to it! - - Copied: trunk/lgpl-utils/LEGAL/lgpl-utils-license.txt (from rev 3325, trunk/lgpl-utils/LEGAL/LICENSE.txt) =================================================================== --- trunk/lgpl-utils/LEGAL/lgpl-utils-license.txt (rev 0) +++ trunk/lgpl-utils/LEGAL/lgpl-utils-license.txt 2010-07-29 19:18:51 UTC (rev 3358) @@ -0,0 +1,504 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public Lice... [truncated message content] |
From: <tho...@us...> - 2010-08-01 18:53:27
|
Revision: 3382 http://bigdata.svn.sourceforge.net/bigdata/?rev=3382&view=rev Author: thompsonbry Date: 2010-08-01 18:53:21 +0000 (Sun, 01 Aug 2010) Log Message: ----------- Modified scale-out to use the WORMStrategy. Interned some class names in IndexMetadata which were being redundantly stored in the heap. Changed the index segment file naming convention to use enough digits (10) to represent an int32 index partition identifier. Fixed the DirectBufferPool statistics. Since these now represent a collection of pools, they have to be dynamically reattached in order to update correctly. Fixed the benchmark.txt queries for the DirectBufferPools. Restored the use of the parallel old generation GC mode to the bigdataCluster config files, but commented out the explicit assignment of a number of cores to be used for GC since the JVM default is the #of cores on the machine and this is otherwise machine specific. Turned down the BlockingBuffer logging level (to ERROR) since it was cluttering the detail.log file. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/btree/IndexMetadata.java trunk/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java trunk/bigdata/src/java/com/bigdata/io/WriteCache.java trunk/bigdata/src/java/com/bigdata/journal/AbstractJournal.java trunk/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java trunk/bigdata/src/java/com/bigdata/resources/OverflowManager.java trunk/bigdata/src/java/com/bigdata/resources/ResourceEvents.java trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java trunk/bigdata/src/java/com/bigdata/service/DataService.java trunk/bigdata/src/java/com/bigdata/service/DefaultServiceFederationDelegate.java trunk/src/resources/analysis/queries/benchmark.txt trunk/src/resources/config/bigdataCluster.config trunk/src/resources/config/bigdataCluster16.config trunk/src/resources/config/log4j.properties Modified: trunk/bigdata/src/java/com/bigdata/btree/IndexMetadata.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2010-07-31 00:52:15 UTC (rev 3381) +++ trunk/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2010-08-01 18:53:21 UTC (rev 3382) @@ -2049,10 +2049,14 @@ // Note: default assumes NOT an index partition. this.pmd = null; + /* Intern'd to reduce duplication on the heap. Will be com.bigdata.btree.BTree or + * com.bigdata.btree.IndexSegment and occasionally a class derived from BTree. + */ this.btreeClassName = getProperty(indexManager, properties, namespace, - Options.BTREE_CLASS_NAME, BTree.class.getName().toString()); + Options.BTREE_CLASS_NAME, BTree.class.getName()).intern(); - this.checkpointClassName = Checkpoint.class.getName(); + // Intern'd to reduce duplication on the heap. + this.checkpointClassName = Checkpoint.class.getName().intern(); // this.addrSer = AddressSerializer.INSTANCE; Modified: trunk/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2010-07-31 00:52:15 UTC (rev 3381) +++ trunk/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2010-08-01 18:53:21 UTC (rev 3382) @@ -277,19 +277,19 @@ AbstractStatisticsCollector .addGarbageCollectorMXBeanCounters(serviceRoot .makePath(ICounterHierarchy.Memory_GarbageCollectors)); - - /* - * Add counters reporting on the various DirectBufferPools. - */ - { - // general purpose pool. - serviceRoot.makePath( - IProcessCounters.Memory + ICounterSet.pathSeparator - + "DirectBufferPool").attach( - DirectBufferPool.getCounters()); - - } + // Moved since counters must be dynamically reattached to reflect pool hierarchy. +// /* +// * Add counters reporting on the various DirectBufferPools. +// */ +// { +// +// serviceRoot.makePath( +// IProcessCounters.Memory + ICounterSet.pathSeparator +// + "DirectBufferPool").attach( +// DirectBufferPool.getCounters()); +// +// } if (LRUNexus.INSTANCE != null) { Modified: trunk/bigdata/src/java/com/bigdata/io/WriteCache.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/io/WriteCache.java 2010-07-31 00:52:15 UTC (rev 3381) +++ trunk/bigdata/src/java/com/bigdata/io/WriteCache.java 2010-08-01 18:53:21 UTC (rev 3382) @@ -51,7 +51,7 @@ import com.bigdata.counters.Instrument; import com.bigdata.journal.AbstractBufferStrategy; import com.bigdata.journal.DiskOnlyStrategy; -import com.bigdata.journal.DiskOnlyStrategy.StoreCounters; +//import com.bigdata.journal.DiskOnlyStrategy.StoreCounters; import com.bigdata.rawstore.Bytes; import com.bigdata.rawstore.IRawStore; import com.bigdata.rwstore.RWStore; Modified: trunk/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2010-07-31 00:52:15 UTC (rev 3381) +++ trunk/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2010-08-01 18:53:21 UTC (rev 3382) @@ -1027,33 +1027,33 @@ } - case Disk: { +// case Disk: { +// +// /* +// * Setup the buffer strategy. +// */ +// +// fileMetadata = new FileMetadata(file, BufferMode.Disk, +// useDirectBuffers, initialExtent, maximumExtent, create, +// isEmptyFile, deleteOnExit, readOnly, forceWrites, +// offsetBits, //readCacheCapacity, readCacheMaxRecordSize, +// //readOnly ? null : writeCache, +// writeCacheEnabled, +// validateChecksum, +// createTime, checker, alternateRootBlock); +// +// _bufferStrategy = new DiskOnlyStrategy( +// 0L/* soft limit for maximumExtent */, +//// minimumExtension, +// fileMetadata); +// +// this._rootBlock = fileMetadata.rootBlock; +// +// break; +// +// } - /* - * Setup the buffer strategy. - */ - - fileMetadata = new FileMetadata(file, BufferMode.Disk, - useDirectBuffers, initialExtent, maximumExtent, create, - isEmptyFile, deleteOnExit, readOnly, forceWrites, - offsetBits, //readCacheCapacity, readCacheMaxRecordSize, - //readOnly ? null : writeCache, - writeCacheEnabled, - validateChecksum, - createTime, checker, alternateRootBlock); - - _bufferStrategy = new DiskOnlyStrategy( - 0L/* soft limit for maximumExtent */, -// minimumExtension, - fileMetadata); - - this._rootBlock = fileMetadata.rootBlock; - - break; - - } - -// case Disk: + case Disk: case DiskWORM: { /* Modified: trunk/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java 2010-07-31 00:52:15 UTC (rev 3381) +++ trunk/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java 2010-08-01 18:53:21 UTC (rev 3382) @@ -46,6 +46,7 @@ import com.bigdata.io.DirectBufferPool; import com.bigdata.io.FileChannelUtility; import com.bigdata.io.IReopenChannel; +import com.bigdata.journal.WORMStrategy.StoreCounters; import com.bigdata.rawstore.Bytes; import com.bigdata.rawstore.IRawStore; import com.bigdata.resources.StoreManager.ManagedJournal; @@ -501,7 +502,7 @@ writeCache.flush(); - storeCounters.ncacheFlush++; +// storeCounters.ncacheFlush++; } @@ -544,551 +545,551 @@ } - /** - * Counters for {@link IRawStore} access, including operations that read or - * write through to the underlying media. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - * - * @todo report elapsed time and average latency for force, reopen, and - * writeRootBlock. - * - * @todo counters need to be atomic if we want to avoid the possibility of - * concurrent <code>x++</code> operations failing to correctly - * increment <code>x</code> for each request. - */ - public static class StoreCounters { - - /** - * #of read requests. - */ - public long nreads; - - /** - * #of read requests that are satisfied by our write cache (vs the - * OS or disk level write cache). - */ - public long ncacheRead; - - /** - * #of read requests that read through to the backing file. - */ - public long ndiskRead; - - /** - * #of bytes read. - */ - public long bytesRead; - - /** - * #of bytes that have been read from the disk. - */ - public long bytesReadFromDisk; - - /** - * The size of the largest record read. - */ - public long maxReadSize; - - /** - * Total elapsed time for reads. - */ - public long elapsedReadNanos; - - /** - * Total elapsed time checking the disk write cache for records to be - * read. - */ - public long elapsedCacheReadNanos; - - /** - * Total elapsed time for reading on the disk. - */ - public long elapsedDiskReadNanos; - - /** - * #of write requests. - */ - public long nwrites; - - /** - * #of write requests that are absorbed by our write cache (vs the OS or - * disk level write cache). - */ - public long ncacheWrite; - - /** - * #of times the write cache was flushed to disk. - */ - public long ncacheFlush; - - /** - * #of write requests that write through to the backing file. - */ - public long ndiskWrite; - - /** - * The size of the largest record written. - */ - public long maxWriteSize; - - /** - * #of bytes written. - */ - public long bytesWritten; - - /** - * #of bytes that have been written on the disk. - */ - public long bytesWrittenOnDisk; - - /** - * Total elapsed time for writes. - */ - public long elapsedWriteNanos; - - /** - * Total elapsed time writing records into the cache (does not count - * time to flush the cache when it is full or to write records that do - * not fit in the cache directly to the disk). - */ - public long elapsedCacheWriteNanos; - - /** - * Total elapsed time for writing on the disk. - */ - public long elapsedDiskWriteNanos; - - /** - * #of times the data were forced to the disk. - */ - public long nforce; - - /** - * #of times the length of the file was changed (typically, extended). - */ - public long ntruncate; - - /** - * #of times the file has been reopened after it was closed by an - * interrupt. - */ - public long nreopen; - - /** - * #of times one of the root blocks has been written. - */ - public long nwriteRootBlock; - - /** - * Initialize a new set of counters. - */ - public StoreCounters() { - - } - - /** - * Copy ctor. - * @param o - */ - public StoreCounters(final StoreCounters o) { - - add( o ); - - } - - /** - * Adds counters to the current counters. - * - * @param o - */ - public void add(final StoreCounters o) { - - nreads += o.nreads; - ncacheRead += o.ncacheRead; - ndiskRead += o.ndiskRead; - bytesRead += o.bytesRead; - bytesReadFromDisk += o.bytesReadFromDisk; - maxReadSize += o.maxReadSize; - elapsedReadNanos += o.elapsedReadNanos; - elapsedCacheReadNanos += o.elapsedCacheReadNanos; - elapsedDiskReadNanos += o.elapsedDiskReadNanos; - - nwrites += o.nwrites; - ncacheWrite += o.ncacheWrite; - ncacheFlush += o.ncacheFlush; - ndiskWrite += o.ndiskWrite; - maxWriteSize += o.maxWriteSize; - bytesWritten += o.bytesWritten; - bytesWrittenOnDisk += o.bytesWrittenOnDisk; - elapsedWriteNanos += o.elapsedWriteNanos; - elapsedCacheWriteNanos += o.elapsedCacheWriteNanos; - elapsedDiskWriteNanos += o.elapsedDiskWriteNanos; - - nforce += o.nforce; - ntruncate += o.ntruncate; - nreopen += o.nreopen; - nwriteRootBlock += o.nwriteRootBlock; - - } - - /** - * Returns a new {@link StoreCounters} containing the current counter values - * minus the given counter values. - * - * @param o - * - * @return - */ - public StoreCounters subtract(final StoreCounters o) { - - // make a copy of the current counters. - final StoreCounters t = new StoreCounters(this); - - // subtract out the given counters. - t.nreads -= o.nreads; - t.ncacheRead -= o.ncacheRead; - t.ndiskRead -= o.ndiskRead; - t.bytesRead -= o.bytesRead; - t.bytesReadFromDisk -= o.bytesReadFromDisk; - t.maxReadSize -= o.maxReadSize; - t.elapsedReadNanos -= o.elapsedReadNanos; - t.elapsedCacheReadNanos -= o.elapsedCacheReadNanos; - t.elapsedDiskReadNanos -= o.elapsedDiskReadNanos; - - t.nwrites -= o.nwrites; - t.ncacheWrite -= o.ncacheWrite; - t.ncacheFlush -= o.ncacheFlush; - t.ndiskWrite -= o.ndiskWrite; - t.maxWriteSize -= o.maxWriteSize; - t.bytesWritten -= o.bytesWritten; - t.bytesWrittenOnDisk -= o.bytesWrittenOnDisk; - t.elapsedWriteNanos -= o.elapsedWriteNanos; - t.elapsedCacheWriteNanos -= o.elapsedCacheWriteNanos; - t.elapsedDiskWriteNanos -= o.elapsedDiskWriteNanos; - - t.nforce -= o.nforce; - t.ntruncate -= o.ntruncate; - t.nreopen -= o.nreopen; - t.nwriteRootBlock -= o.nwriteRootBlock; - - return t; - - } - - synchronized public CounterSet getCounters() { - - if (root == null) { - - root = new CounterSet(); - - // IRawStore API - { - - /* - * reads - */ - - root.addCounter("nreads", new Instrument<Long>() { - public void sample() { - setValue(nreads); - } - }); - - root.addCounter("bytesRead", new Instrument<Long>() { - public void sample() { - setValue(bytesRead); - } - }); - - root.addCounter("readSecs", new Instrument<Double>() { - public void sample() { - final double elapsedReadSecs = (elapsedReadNanos / 1000000000.); - setValue(elapsedReadSecs); - } - }); - - root.addCounter("bytesReadPerSec", - new Instrument<Double>() { - public void sample() { - final double readSecs = (elapsedReadNanos / 1000000000.); - final double bytesReadPerSec = (readSecs == 0L ? 0d - : (bytesRead / readSecs)); - setValue(bytesReadPerSec); - } - }); - - root.addCounter("maxReadSize", new Instrument<Long>() { - public void sample() { - setValue(maxReadSize); - } - }); - - /* - * writes - */ - - root.addCounter("nwrites", new Instrument<Long>() { - public void sample() { - setValue(nwrites); - } - }); - - root.addCounter("bytesWritten", new Instrument<Long>() { - public void sample() { - setValue(bytesWritten); - } - }); - - root.addCounter("writeSecs", new Instrument<Double>() { - public void sample() { - final double writeSecs = (elapsedWriteNanos / 1000000000.); - setValue(writeSecs); - } - }); - - root.addCounter("bytesWrittenPerSec", - new Instrument<Double>() { - public void sample() { - final double writeSecs = (elapsedWriteNanos / 1000000000.); - final double bytesWrittenPerSec = (writeSecs == 0L ? 0d - : (bytesWritten / writeSecs)); - setValue(bytesWrittenPerSec); - } - }); - - root.addCounter("maxWriteSize", new Instrument<Long>() { - public void sample() { - setValue(maxWriteSize); - } - }); - - } - - /* - * write cache statistics - */ - { - - final CounterSet writeCache = root.makePath("writeCache"); - - /* - * read - */ - writeCache.addCounter("nread", new Instrument<Long>() { - public void sample() { - setValue(ncacheRead); - } - }); - - writeCache.addCounter("readHitRate", new Instrument<Double>() { - public void sample() { - setValue(nreads == 0L ? 0d : (double) ncacheRead - / nreads); - } - }); - - writeCache.addCounter("readSecs", new Instrument<Double>() { - public void sample() { - setValue(elapsedCacheReadNanos / 1000000000.); - } - }); - - /* - * write - */ - - // #of writes on the write cache. - writeCache.addCounter("nwrite", new Instrument<Long>() { - public void sample() { - setValue(ncacheWrite); - } - }); - - /* - * % of writes that are buffered vs writing through to the - * disk. - * - * Note: This will be 1.0 unless you are writing large - * records. Large records are written directly to the disk - * rather than first into the write cache. When this happens - * the writeHitRate on the cache can be less than one. - */ - writeCache.addCounter("writeHitRate", new Instrument<Double>() { - public void sample() { - setValue(nwrites == 0L ? 0d : (double) ncacheWrite - / nwrites); - } - }); - - writeCache.addCounter("writeSecs", new Instrument<Double>() { - public void sample() { - setValue(elapsedCacheWriteNanos / 1000000000.); - } - }); - - // #of times the write cache was flushed to the disk. - writeCache.addCounter("nflush", new Instrument<Long>() { - public void sample() { - setValue(ncacheFlush); - } - }); - - } - - // disk statistics - { - final CounterSet disk = root.makePath("disk"); - - /* - * read - */ - - disk.addCounter("nreads", new Instrument<Long>() { - public void sample() { - setValue(ndiskRead); - } - }); - - disk.addCounter("bytesRead", new Instrument<Long>() { - public void sample() { - setValue(bytesReadFromDisk); - } - }); - - disk.addCounter("bytesPerRead", new Instrument<Double>() { - public void sample() { - final double bytesPerDiskRead = (ndiskRead == 0 ? 0d - : (bytesReadFromDisk / (double)ndiskRead)); - setValue(bytesPerDiskRead); - } - }); - - disk.addCounter("readSecs", new Instrument<Double>() { - public void sample() { - final double diskReadSecs = (elapsedDiskReadNanos / 1000000000.); - setValue(diskReadSecs); - } - }); - - disk.addCounter("bytesReadPerSec", - new Instrument<Double>() { - public void sample() { - final double diskReadSecs = (elapsedDiskReadNanos / 1000000000.); - final double bytesReadPerSec = (diskReadSecs == 0L ? 0d - : bytesReadFromDisk / diskReadSecs); - setValue(bytesReadPerSec); - } - }); - - disk.addCounter("secsPerRead", new Instrument<Double>() { - public void sample() { - final double diskReadSecs = (elapsedDiskReadNanos / 1000000000.); - final double readLatency = (diskReadSecs == 0 ? 0d - : diskReadSecs / ndiskRead); - setValue(readLatency); - } - }); - - /* - * write - */ - - disk.addCounter("nwrites", new Instrument<Long>() { - public void sample() { - setValue(ndiskWrite); - } - }); - - disk.addCounter("bytesWritten", new Instrument<Long>() { - public void sample() { - setValue(bytesWrittenOnDisk); - } - }); - - disk.addCounter("bytesPerWrite", new Instrument<Double>() { - public void sample() { - final double bytesPerDiskWrite = (ndiskWrite == 0 ? 0d - : (bytesWrittenOnDisk / (double)ndiskWrite)); - setValue(bytesPerDiskWrite); - } - }); - - disk.addCounter("writeSecs", new Instrument<Double>() { - public void sample() { - final double diskWriteSecs = (elapsedDiskWriteNanos / 1000000000.); - setValue(diskWriteSecs); - } - }); - - disk.addCounter("bytesWrittenPerSec", - new Instrument<Double>() { - public void sample() { - final double diskWriteSecs = (elapsedDiskWriteNanos / 1000000000.); - final double bytesWrittenPerSec = (diskWriteSecs == 0L ? 0d - : bytesWrittenOnDisk - / diskWriteSecs); - setValue(bytesWrittenPerSec); - } - }); - - disk.addCounter("secsPerWrite", new Instrument<Double>() { - public void sample() { - final double diskWriteSecs = (elapsedDiskWriteNanos / 1000000000.); - final double writeLatency = (diskWriteSecs == 0 ? 0d - : diskWriteSecs / ndiskWrite); - setValue(writeLatency); - } - }); - - /* - * other - */ - - disk.addCounter("nforce", new Instrument<Long>() { - public void sample() { - setValue(nforce); - } - }); - - disk.addCounter("nextend", new Instrument<Long>() { - public void sample() { - setValue(ntruncate); - } - }); - - disk.addCounter("nreopen", new Instrument<Long>() { - public void sample() { - setValue(nreopen); - } - }); - - disk.addCounter("rootBlockWrites", new Instrument<Long>() { - public void sample() { - setValue(nwriteRootBlock); - } - }); - - } - - } - - return root; - - } - private CounterSet root; - - /** - * Human readable representation of the counters. - */ - public String toString() { - - return getCounters().toString(); - - } - - } +// /** +// * Counters for {@link IRawStore} access, including operations that read or +// * write through to the underlying media. +// * +// * @author <a href="mailto:tho...@us...">Bryan Thompson</a> +// * @version $Id$ +// * +// * @todo report elapsed time and average latency for force, reopen, and +// * writeRootBlock. +// * +// * @todo counters need to be atomic if we want to avoid the possibility of +// * concurrent <code>x++</code> operations failing to correctly +// * increment <code>x</code> for each request. +// */ +// public static class StoreCounters { +// +// /** +// * #of read requests. +// */ +// public long nreads; +// +// /** +// * #of read requests that are satisfied by our write cache (vs the +// * OS or disk level write cache). +// */ +// public long ncacheRead; +// +// /** +// * #of read requests that read through to the backing file. +// */ +// public long ndiskRead; +// +// /** +// * #of bytes read. +// */ +// public long bytesRead; +// +// /** +// * #of bytes that have been read from the disk. +// */ +// public long bytesReadFromDisk; +// +// /** +// * The size of the largest record read. +// */ +// public long maxReadSize; +// +// /** +// * Total elapsed time for reads. +// */ +// public long elapsedReadNanos; +// +// /** +// * Total elapsed time checking the disk write cache for records to be +// * read. +// */ +// public long elapsedCacheReadNanos; +// +// /** +// * Total elapsed time for reading on the disk. +// */ +// public long elapsedDiskReadNanos; +// +// /** +// * #of write requests. +// */ +// public long nwrites; +// +// /** +// * #of write requests that are absorbed by our write cache (vs the OS or +// * disk level write cache). +// */ +// public long ncacheWrite; +// +// /** +// * #of times the write cache was flushed to disk. +// */ +// public long ncacheFlush; +// +// /** +// * #of write requests that write through to the backing file. +// */ +// public long ndiskWrite; +// +// /** +// * The size of the largest record written. +// */ +// public long maxWriteSize; +// +// /** +// * #of bytes written. +// */ +// public long bytesWritten; +// +// /** +// * #of bytes that have been written on the disk. +// */ +// public long bytesWrittenOnDisk; +// +// /** +// * Total elapsed time for writes. +// */ +// public long elapsedWriteNanos; +// +// /** +// * Total elapsed time writing records into the cache (does not count +// * time to flush the cache when it is full or to write records that do +// * not fit in the cache directly to the disk). +// */ +// public long elapsedCacheWriteNanos; +// +// /** +// * Total elapsed time for writing on the disk. +// */ +// public long elapsedDiskWriteNanos; +// +// /** +// * #of times the data were forced to the disk. +// */ +// public long nforce; +// +// /** +// * #of times the length of the file was changed (typically, extended). +// */ +// public long ntruncate; +// +// /** +// * #of times the file has been reopened after it was closed by an +// * interrupt. +// */ +// public long nreopen; +// +// /** +// * #of times one of the root blocks has been written. +// */ +// public long nwriteRootBlock; +// +// /** +// * Initialize a new set of counters. +// */ +// public StoreCounters() { +// +// } +// +// /** +// * Copy ctor. +// * @param o +// */ +// public StoreCounters(final StoreCounters o) { +// +// add( o ); +// +// } +// +// /** +// * Adds counters to the current counters. +// * +// * @param o +// */ +// public void add(final StoreCounters o) { +// +// nreads += o.nreads; +// ncacheRead += o.ncacheRead; +// ndiskRead += o.ndiskRead; +// bytesRead += o.bytesRead; +// bytesReadFromDisk += o.bytesReadFromDisk; +// maxReadSize += o.maxReadSize; +// elapsedReadNanos += o.elapsedReadNanos; +// elapsedCacheReadNanos += o.elapsedCacheReadNanos; +// elapsedDiskReadNanos += o.elapsedDiskReadNanos; +// +// nwrites += o.nwrites; +// ncacheWrite += o.ncacheWrite; +// ncacheFlush += o.ncacheFlush; +// ndiskWrite += o.ndiskWrite; +// maxWriteSize += o.maxWriteSize; +// bytesWritten += o.bytesWritten; +// bytesWrittenOnDisk += o.bytesWrittenOnDisk; +// elapsedWriteNanos += o.elapsedWriteNanos; +// elapsedCacheWriteNanos += o.elapsedCacheWriteNanos; +// elapsedDiskWriteNanos += o.elapsedDiskWriteNanos; +// +// nforce += o.nforce; +// ntruncate += o.ntruncate; +// nreopen += o.nreopen; +// nwriteRootBlock += o.nwriteRootBlock; +// +// } +// +// /** +// * Returns a new {@link StoreCounters} containing the current counter values +// * minus the given counter values. +// * +// * @param o +// * +// * @return +// */ +// public StoreCounters subtract(final StoreCounters o) { +// +// // make a copy of the current counters. +// final StoreCounters t = new StoreCounters(this); +// +// // subtract out the given counters. +// t.nreads -= o.nreads; +// t.ncacheRead -= o.ncacheRead; +// t.ndiskRead -= o.ndiskRead; +// t.bytesRead -= o.bytesRead; +// t.bytesReadFromDisk -= o.bytesReadFromDisk; +// t.maxReadSize -= o.maxReadSize; +// t.elapsedReadNanos -= o.elapsedReadNanos; +// t.elapsedCacheReadNanos -= o.elapsedCacheReadNanos; +// t.elapsedDiskReadNanos -= o.elapsedDiskReadNanos; +// +// t.nwrites -= o.nwrites; +// t.ncacheWrite -= o.ncacheWrite; +// t.ncacheFlush -= o.ncacheFlush; +// t.ndiskWrite -= o.ndiskWrite; +// t.maxWriteSize -= o.maxWriteSize; +// t.bytesWritten -= o.bytesWritten; +// t.bytesWrittenOnDisk -= o.bytesWrittenOnDisk; +// t.elapsedWriteNanos -= o.elapsedWriteNanos; +// t.elapsedCacheWriteNanos -= o.elapsedCacheWriteNanos; +// t.elapsedDiskWriteNanos -= o.elapsedDiskWriteNanos; +// +// t.nforce -= o.nforce; +// t.ntruncate -= o.ntruncate; +// t.nreopen -= o.nreopen; +// t.nwriteRootBlock -= o.nwriteRootBlock; +// +// return t; +// +// } +// +// synchronized public CounterSet getCounters() { +// +// if (root == null) { +// +// root = new CounterSet(); +// +// // IRawStore API +// { +// +// /* +// * reads +// */ +// +// root.addCounter("nreads", new Instrument<Long>() { +// public void sample() { +// setValue(nreads); +// } +// }); +// +// root.addCounter("bytesRead", new Instrument<Long>() { +// public void sample() { +// setValue(bytesRead); +// } +// }); +// +// root.addCounter("readSecs", new Instrument<Double>() { +// public void sample() { +// final double elapsedReadSecs = (elapsedReadNanos / 1000000000.); +// setValue(elapsedReadSecs); +// } +// }); +// +// root.addCounter("bytesReadPerSec", +// new Instrument<Double>() { +// public void sample() { +// final double readSecs = (elapsedReadNanos / 1000000000.); +// final double bytesReadPerSec = (readSecs == 0L ? 0d +// : (bytesRead / readSecs)); +// setValue(bytesReadPerSec); +// } +// }); +// +// root.addCounter("maxReadSize", new Instrument<Long>() { +// public void sample() { +// setValue(maxReadSize); +// } +// }); +// +// /* +// * writes +// */ +// +// root.addCounter("nwrites", new Instrument<Long>() { +// public void sample() { +// setValue(nwrites); +// } +// }); +// +// root.addCounter("bytesWritten", new Instrument<Long>() { +// public void sample() { +// setValue(bytesWritten); +// } +// }); +// +// root.addCounter("writeSecs", new Instrument<Double>() { +// public void sample() { +// final double writeSecs = (elapsedWriteNanos / 1000000000.); +// setValue(writeSecs); +// } +// }); +// +// root.addCounter("bytesWrittenPerSec", +// new Instrument<Double>() { +// public void sample() { +// final double writeSecs = (elapsedWriteNanos / 1000000000.); +// final double bytesWrittenPerSec = (writeSecs == 0L ? 0d +// : (bytesWritten / writeSecs)); +// setValue(bytesWrittenPerSec); +// } +// }); +// +// root.addCounter("maxWriteSize", new Instrument<Long>() { +// public void sample() { +// setValue(maxWriteSize); +// } +// }); +// +// } +// +// /* +// * write cache statistics +// */ +// { +// +// final CounterSet writeCache = root.makePath("writeCache"); +// +// /* +// * read +// */ +// writeCache.addCounter("nread", new Instrument<Long>() { +// public void sample() { +// setValue(ncacheRead); +// } +// }); +// +// writeCache.addCounter("readHitRate", new Instrument<Double>() { +// public void sample() { +// setValue(nreads == 0L ? 0d : (double) ncacheRead +// / nreads); +// } +// }); +// +// writeCache.addCounter("readSecs", new Instrument<Double>() { +// public void sample() { +// setValue(elapsedCacheReadNanos / 1000000000.); +// } +// }); +// +// /* +// * write +// */ +// +// // #of writes on the write cache. +// writeCache.addCounter("nwrite", new Instrument<Long>() { +// public void sample() { +// setValue(ncacheWrite); +// } +// }); +// +// /* +// * % of writes that are buffered vs writing through to the +// * disk. +// * +// * Note: This will be 1.0 unless you are writing large +// * records. Large records are written directly to the disk +// * rather than first into the write cache. When this happens +// * the writeHitRate on the cache can be less than one. +// */ +// writeCache.addCounter("writeHitRate", new Instrument<Double>() { +// public void sample() { +// setValue(nwrites == 0L ? 0d : (double) ncacheWrite +// / nwrites); +// } +// }); +// +// writeCache.addCounter("writeSecs", new Instrument<Double>() { +// public void sample() { +// setValue(elapsedCacheWriteNanos / 1000000000.); +// } +// }); +// +// // #of times the write cache was flushed to the disk. +// writeCache.addCounter("nflush", new Instrument<Long>() { +// public void sample() { +// setValue(ncacheFlush); +// } +// }); +// +// } +// +// // disk statistics +// { +// final CounterSet disk = root.makePath("disk"); +// +// /* +// * read +// */ +// +// disk.addCounter("nreads", new Instrument<Long>() { +// public void sample() { +// setValue(ndiskRead); +// } +// }); +// +// disk.addCounter("bytesRead", new Instrument<Long>() { +// public void sample() { +// setValue(bytesReadFromDisk); +// } +// }); +// +// disk.addCounter("bytesPerRead", new Instrument<Double>() { +// public void sample() { +// final double bytesPerDiskRead = (ndiskRead == 0 ? 0d +// : (bytesReadFromDisk / (double)ndiskRead)); +// setValue(bytesPerDiskRead); +// } +// }); +// +// disk.addCounter("readSecs", new Instrument<Double>() { +// public void sample() { +// final double diskReadSecs = (elapsedDiskReadNanos / 1000000000.); +// setValue(diskReadSecs); +// } +// }); +// +// disk.addCounter("bytesReadPerSec", +// new Instrument<Double>() { +// public void sample() { +// final double diskReadSecs = (elapsedDiskReadNanos / 1000000000.); +// final double bytesReadPerSec = (diskReadSecs == 0L ? 0d +// : bytesReadFromDisk / diskReadSecs); +// setValue(bytesReadPerSec); +// } +// }); +// +// disk.addCounter("secsPerRead", new Instrument<Double>() { +// public void sample() { +// final double diskReadSecs = (elapsedDiskReadNanos / 1000000000.); +// final double readLatency = (diskReadSecs == 0 ? 0d +// : diskReadSecs / ndiskRead); +// setValue(readLatency); +// } +// }); +// +// /* +// * write +// */ +// +// disk.addCounter("nwrites", new Instrument<Long>() { +// public void sample() { +// setValue(ndiskWrite); +// } +// }); +// +// disk.addCounter("bytesWritten", new Instrument<Long>() { +// public void sample() { +// setValue(bytesWrittenOnDisk); +// } +// }); +// +// disk.addCounter("bytesPerWrite", new Instrument<Double>() { +// public void sample() { +// final double bytesPerDiskWrite = (ndiskWrite == 0 ? 0d +// : (bytesWrittenOnDisk / (double)ndiskWrite)); +// setValue(bytesPerDiskWrite); +// } +// }); +// +// disk.addCounter("writeSecs", new Instrument<Double>() { +// public void sample() { +// final double diskWriteSecs = (elapsedDiskWriteNanos / 1000000000.); +// setValue(diskWriteSecs); +// } +// }); +// +// disk.addCounter("bytesWrittenPerSec", +// new Instrument<Double>() { +// public void sample() { +// final double diskWriteSecs = (elapsedDiskWriteNanos / 1000000000.); +// final double bytesWrittenPerSec = (diskWriteSecs == 0L ? 0d +// : bytesWrittenOnDisk +// / diskWriteSecs); +// setValue(bytesWrittenPerSec); +// } +// }); +// +// disk.addCounter("secsPerWrite", new Instrument<Double>() { +// public void sample() { +// final double diskWriteSecs = (elapsedDiskWriteNanos / 1000000000.); +// final double writeLatency = (diskWriteSecs == 0 ? 0d +// : diskWriteSecs / ndiskWrite); +// setValue(writeLatency); +// } +// }); +// +// /* +// * other +// */ +// +// disk.addCounter("nforce", new Instrument<Long>() { +// public void sample() { +// setValue(nforce); +// } +// }); +// +// disk.addCounter("nextend", new Instrument<Long>() { +// public void sample() { +// setValue(ntruncate); +// } +// }); +// +// disk.addCounter("nreopen", new Instrument<Long>() { +// public void sample() { +// setValue(nreopen); +// } +// }); +// +// disk.addCounter("rootBlockWrites", new Instrument<Long>() { +// public void sample() { +// setValue(nwriteRootBlock); +// } +// }); +// +// } +// +// } +// +// return root; +// +// } +// private CounterSet root; +// +// /** +// * Human readable representation of the counters. +// */ +// public String toString() { +// +// return getCounters().toString(); +// +// } +// +// } // class StoreCounters /** * Performance counters for this class. @@ -1615,7 +1616,7 @@ */ storeCounters.nreads++; storeCounters.bytesRead+=nbytes; - storeCounters.ncacheRead++; +// storeCounters.ncacheRead++; storeCounters.elapsedReadNanos+=(System.nanoTime()-begin); // return the new buffer. @@ -1623,7 +1624,7 @@ } else { - storeCounters.elapsedCacheReadNanos+=(System.nanoTime()-beginCache); +// storeCounters.elapsedCacheReadNanos+=(System.nanoTime()-beginCache); } @@ -2109,10 +2110,10 @@ writeCache.write(addr, data); - storeCounters.ncacheWrite++; +// storeCounters.ncacheWrite++; +// +// storeCounters.elapsedCacheWriteNanos+=(System.nanoTime()-beginCache); - storeCounters.elapsedCacheWriteNanos+=(System.nanoTime()-beginCache); - } } else { Modified: trunk/bigdata/src/java/com/bigdata/resources/OverflowManager.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/OverflowManager.java 2010-07-31 00:52:15 UTC (rev 3381) +++ trunk/bigdata/src/java/com/bigdata/resources/OverflowManager.java 2010-08-01 18:53:21 UTC (rev 3382) @@ -724,33 +724,36 @@ + ".movePercentCpuTimeThreshold"; String DEFAULT_MOVE_PERCENT_CPU_TIME_THRESHOLD = ".7"; - - /** - * The maximum #of optional compacting merge operations that will be - * performed during a single overflow event (default - * {@value #DEFAULT_OPTIONAL_COMPACTING_MERGES_PER_OVERFLOW}). - * <p> - * Once this #of optional compacting merge tasks have been identified - * for a given overflow event, the remainder of the index partitions - * that are neither split, joined, moved, nor copied will use - * incremental builds. An incremental build is generally cheaper since - * it only copies the data on the mutable {@link BTree} for the - * lastCommitTime rather than the fused view. A compacting merge permits - * the older index segments to be released and results in a simpler view - * with view {@link IndexSegment}s. Either a compacting merge or an - * incremental build will permit old journals to be released once the - * commit points on those journals are no longer required. - * <p> - * Note: Mandatory compacting merges are identified based on - * {@link #MAXIMUM_JOURNALS_PER_VIEW} and - * {@link #MAXIMUM_SEGMENTS_PER_VIEW}. There is NO limit the #of - * mandatory compacting merges that will be performed during an - * asynchronous overflow event. However, each mandatory compacting merge - * does count towards the maximum #of optional merges. Therefore if the - * #of mandatory compacting merges is greater than this parameter then - * NO optional compacting merges will be selected in a given overflow - * cycle. - */ + + /** + * The maximum #of optional compacting merge operations that will be + * performed during a single overflow event (default + * {@value #DEFAULT_OPTIONAL_COMPACTING_MERGES_PER_OVERFLOW}). + * <p> + * Once this #of optional compacting merge tasks have been identified + * for a given overflow event, the remainder of the index partitions + * that are neither split, joined, moved, nor copied will use + * incremental builds. An incremental build is generally cheaper since + * it only copies the data on the mutable {@link BTree} for the + * lastCommitTime rather than the fused view. A compacting merge permits + * the older index segments to be released and results in a simpler view + * with view {@link IndexSegment}s. Either a compacting merge or an + * incremental build will permit old journals to be released once the + * commit points on those journals are no longer required. + * <p> + * Note: Mandatory compacting merges are identified based on + * {@link #MAXIMUM_JOURNALS_PER_VIEW} and + * {@link #MAXIMUM_SEGMENTS_PER_VIEW}. There is NO limit the #of + * mandatory compacting merges that will be performed during an + * asynchronous overflow event. However, each mandatory compacting merge + * does count towards the maximum #of optional merges. Therefore if the + * #of mandatory compacting merges is greater than this parameter then + * NO optional compacting merges will be selected in a given overflow + * cycle. + * + * @deprecated merges are now performed in priority order while time + * remains in a given asynchronous overflow cycle. + */ String MAXIMUM_OPTIONAL_MERGES_PER_OVERFLOW = OverflowManager.class .getName() + ".maximumOptionalMergesPerOverflow"; Modified: trunk/bigdata/src/java/com/bigdata/resources/ResourceEvents.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/ResourceEvents.java 2010-07-31 00:52:15 UTC (rev 3381) +++ trunk/bigdata/src/java/com/bigdata/resources/ResourceEvents.java 2010-08-01 18:53:21 UTC (rev 3382) @@ -112,7 +112,8 @@ /** * Leading zeros without commas used to format the partition identifiers - * into index segment file names. + * into index segment file names. This uses 10 digits, which is enough + * to represent {@link Integer#MAX_VALUE}. */ static NumberFormat leadingZeros; @@ -130,7 +131,7 @@ leadingZeros = NumberFormat.getIntegerInstance(); - leadingZeros.setMinimumIntegerDigits(5); + leadingZeros.setMinimumIntegerDigits(10); leadingZeros.setGroupingUsed(false); Modified: trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java 2010-07-31 00:52:15 UTC (rev 3381) +++ trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java 2010-08-01 18:53:21 UTC (rev 3382) @@ -85,8 +85,9 @@ import com.bigdata.journal.ITx; import com.bigdata.journal.Name2Addr; import com.bigdata.journal.TemporaryStore; +import com.bigdata.journal.WORMStrategy; import com.bigdata.journal.WriteExecutorService; -import com.bigdata.journal.DiskOnlyStrategy.StoreCounters; +import com.bigdata.journal.WORMStrategy.StoreCounters; import com.bigdata.mdi.IPartitionMetadata; import com.bigdata.mdi.IResourceMetadata; import com.bigdata.mdi.IndexPartitionCause; @@ -2454,6 +2455,11 @@ ((DiskOnlyStrategy) getBufferStrategy()) .setStoreCounters(getStoreCounters()); + } else if (getBufferStrategy() instanceof WORMStrategy) { + + ((WORMStrategy) getBufferStrategy()) + .setStoreCounters(getStoreCounters()); + } } @@ -4556,7 +4562,7 @@ // make sure that directory exists. indexDir.mkdirs(); - final String partitionStr = (partitionId == -1 ? "" : "_part" + final String partitionStr = (partitionId == -1 ? "" : "_shardId" + leadingZeros.format(partitionId)); final String prefix = mungedName + "" + partitionStr + "_"; Modified: trunk/bigdata/src/java/com/bigdata/service/DataService.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/service/DataService.java 2010-07-31 00:52:15 UTC (rev 3381) +++ trunk/bigdata/src/java/com/bigdata/service/DataService.java 2010-08-01 18:53:21 UTC (rev 3382) @@ -413,6 +413,7 @@ * reattaching the counters for the live {@link ManagedJournal} during * synchronous overflow. */ + @Override synchronized public void reattachDynamicCounters() { final long now = System.currentTimeMillis(); @@ -422,6 +423,9 @@ if (service.isOpen() && service.resourceManager.isRunning() && elapsed > 5000/* ms */) { + // inherit base class behavior + super.reattachDynamicCounters(); + // The service's counter set hierarchy. final CounterSet serviceRoot = service.getFederation() .getServiceCounterSet(); Modified: trunk/bigdata/src/java/com/bigdata/service/DefaultServiceFederationDelegate.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/service/DefaultServiceFederationDelegate.java 2010-07-31 00:52:15 UTC (rev 3381) +++ trunk/bigdata/src/java/com/bigdata/service/DefaultServiceFederationDelegate.java 2010-08-01 18:53:21 UTC (rev 3382) @@ -41,7 +41,11 @@ import org.apache.log4j.Logger; import com.bigdata.counters.CounterSet; +import com.bigdata.counters.ICounterSet; +import com.bigdata.counters.IProcessCounters; import com.bigdata.counters.httpd.CounterSetHTTPD; +import com.bigdata.io.DirectBufferPool; +import com.bigdata.journal.ConcurrencyManager.IConcurrencyManagerCounters; import com.bigdata.util.httpd.AbstractHTTPD; /** @@ -92,9 +96,31 @@ } - /** NOP */ - public void reattachDynamicCounters() { + /** Reattaches the {@link DirectBufferPool} counters. */ + public void reattachDynamicCounters() { + // The service's counter set hierarchy. + final CounterSet serviceRoot = service.getFederation() + .getServiceCounterSet(); + + // Ensure path exists. + final CounterSet tmp = serviceRoot.makePath(IProcessCounters.Memory); + + /* + * Add counters reporting on the various DirectBufferPools. + */ + synchronized (tmp) { + + // detach the old counters (if any). + tmp.detach("DirectBufferPool"); + + // attach the current counters. + tmp.makePath("DirectBufferPool").attach( + DirectBufferPool.getCounters()); + + } + + } /** Modified: trunk/src/resources/analysis/queries/benchmark.txt =================================================================== --- trunk/src/resources/analysis/queries/benchmark.txt 2010-07-31 00:52:15 UTC (rev 3381) +++ trunk/src/resources/analysis/queries/benchmark.txt 2010-08-01 18:53:21 UTC (rev 3382) @@ -75,7 +75,8 @@ http://localhost:8080/?regex=/([^/]*)/.*Memory/Bytes%20Free&correlated=true&depth=3&file=memory/BytesFree http://localhost:8080/?regex=/([^/]*)/.*Memory/Swap%20Bytes%20Used&correlated=true&depth=3&period=Minutes&file=memory/SwapBytesUsed http://localhost:8080/?regex=/([^/]*)/.*Memory/Major%20Page%20Faults%20Per%20Second&correlated=true&depth=3&period=Minutes&file=memory/MajorPageFaultsPerSecond -http://localhost:8080/?regex=/([^/]*)/.*IDataService/.*/Memory/DirectBufferPool/poolSize&correlated=true&depth=12&file=memory/directBufferPool/poolSize +http://localhost:8080/?regex=/([^/]*)/.*IDataService/.*/Memory/DirectBufferPool/totalBytesUsed&correlated=true&depth=12&file=memory/directBufferPool/totalBytesUsed +http://localhost:8080/?regex=/([^/]*)/.*IDataService/.*/Memory/DirectBufferPool/(.*)/poolSize&correlated=true&depth=12&file=memory/directBufferPool/poolSize http://localhost:8080/?regex=/([^/]*)/.*(IDataService|IClientService)/.*/Memory/Virtual%20Size&correlated=true&depth=12&file=memory/VirtualSize http://localhost:8080/?regex=/... [truncated message content] |
From: <tho...@us...> - 2010-08-03 17:12:04
|
Revision: 3402 http://bigdata.svn.sourceforge.net/bigdata/?rev=3402&view=rev Author: thompsonbry Date: 2010-08-03 17:11:58 +0000 (Tue, 03 Aug 2010) Log Message: ----------- Modified NanoSparqlServer to include the query in its log messages (server side). Modified BigdataSail to fix a problem where it was attempting to resolve a language code in a langmatch filter against the KB. Modified Term2IdWriteTask to avoid an NPE when it returns a null IV object in the readOnly mode. Modified BigdataEvaluationStrategy to fix a problem where a Sesame Filter object was being serialized with a rule (https://sourceforge.net/apps/trac/bigdata/ticket/135). Modified Paths: -------------- trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdWriteProc.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdWriteTask.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl2.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdWriteProc.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdWriteProc.java 2010-08-03 15:13:17 UTC (rev 3401) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdWriteProc.java 2010-08-03 17:11:58 UTC (rev 3402) @@ -258,7 +258,7 @@ * For each term whose serialized key is mapped to the current index * partition, lookup the term in the <em>terms</em> index. If it is there * then note its assigned termId. Otherwise, use the partition local counter - * to assign the term identifier, note the term identifer so that it can be + * to assign the term identifier, note the term identifier so that it can be * communicated back to the client, and insert the {term,termId} entry into * the <em>terms</em> index. * @@ -288,8 +288,8 @@ // used to serialize term identifiers. final DataOutputBuffer idbuf = new DataOutputBuffer(); - final TermIdEncoder encoder = new TermIdEncoder(//scaleOutTermIds, - scaleOutTermIdBitsToReverse); + final TermIdEncoder encoder = readOnly ? null : new TermIdEncoder( + scaleOutTermIdBitsToReverse); // #of new terms (#of writes on the index). int nnew = 0; @@ -532,7 +532,7 @@ } - final public static VTE VTE(byte code) { + final public static VTE VTE(final byte code) { switch(code) { case ITermIndexCodes.TERM_CODE_URI: @@ -574,7 +574,7 @@ } - public Result(IV[] ivs) { + public Result(final IV[] ivs) { assert ivs != null; @@ -618,15 +618,7 @@ LongPacker.packLong(out,n); for (int i = 0; i < n; i++) { - - /* - * This is the implementation for backwards - * compatibility. We should not see inline values here. - */ - if (ivs[i].isInline()) { - throw new RuntimeException(); - } - + // LongPacker.packLong(out, ids[i]); out.writeObject(ivs[i]); Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdWriteTask.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdWriteTask.java 2010-08-03 15:13:17 UTC (rev 3401) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdWriteTask.java 2010-08-03 17:11:58 UTC (rev 3402) @@ -198,7 +198,7 @@ } - AbstractKeyArrayIndexProcedureConstructor ctor = + final AbstractKeyArrayIndexProcedureConstructor ctor = new Term2IdWriteProcConstructor( readOnly, r.storeBlankNodes, r.termIdBitsToReverse); Modified: trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl2.java =================================================================== --- trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl2.java 2010-08-03 15:13:17 UTC (rev 3401) +++ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl2.java 2010-08-03 17:11:58 UTC (rev 3402) @@ -1640,7 +1640,23 @@ final IJoinNexus joinNexus = joinNexusFactory.newInstance(database .getIndexManager()); - itr1 = joinNexus.runQuery(step); + +// itr1 = joinNexus.runQuery(step); + + if (step instanceof ProxyRuleWithSesameFilters) { + + /* + * Note: Do not send the proxy rule down the wire. It has Sesame + * Filter objects which are not Serializable. + */ + itr1 = joinNexus.runQuery(((ProxyRuleWithSesameFilters) step) + .getProxyRule()); + + } else { + + itr1 = joinNexus.runQuery(step); + + } } catch (Exception ex) { throw new QueryEvaluationException(ex); Modified: trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2010-08-03 15:13:17 UTC (rev 3401) +++ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2010-08-03 17:11:58 UTC (rev 3402) @@ -87,6 +87,7 @@ import org.openrdf.query.BindingSet; import org.openrdf.query.Dataset; import org.openrdf.query.QueryEvaluationException; +import org.openrdf.query.algebra.LangMatches; import org.openrdf.query.algebra.QueryRoot; import org.openrdf.query.algebra.StatementPattern; import org.openrdf.query.algebra.TupleExpr; @@ -3100,6 +3101,22 @@ @Override public void meet(final ValueConstant constant) { + if (constant.getParentNode() instanceof LangMatches) { + /* Don't try to resolve for lang matches. + * + * Note: Sesame will sometimes use a Literal to represent + * a constant parameter to a function, such as LangMatches. + * For such uses, we DO NOT want to attempt to resolve the + * Literal against the lexicon. Instead, it should just be + * passed through. BigdataSailEvaluationStrategy is then + * responsible for recognizing cases where the lack of an + * IV on a constant is associated with such function calls + * rather than indicating that the Value is not known to + * the KB. + */ + return; + } + final Value val = constant.getValue(); // add BigdataValue variant of the var's Value. @@ -3186,7 +3203,14 @@ @Override public void meet(ValueConstant constant) { - // the Sesame Value object. + if (constant.getParentNode() instanceof LangMatches) { + /* Note: This is parallel to the meet in the visit + * pattern above. + */ + return; + } + + // the Sesame Value object. final Value val = constant.getValue(); // Lookup the resolve BigdataValue object. Modified: trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java =================================================================== --- trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java 2010-08-03 15:13:17 UTC (rev 3401) +++ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java 2010-08-03 17:11:58 UTC (rev 3402) @@ -1021,7 +1021,14 @@ try { queries.put(queryId, new RunningQuery(queryId.longValue(), queryStr, begin)); - doQuery(cxn, os); + try { + doQuery(cxn, os); + } catch(Throwable t) { + /* + * Log the query and the exception together. + */ + log.error(t.getLocalizedMessage() + ":\n" + queryStr, t); + } os.flush(); return null; } catch (Throwable t) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-08-05 16:29:52
|
Revision: 3414 http://bigdata.svn.sourceforge.net/bigdata/?rev=3414&view=rev Author: thompsonbry Date: 2010-08-05 16:29:44 +0000 (Thu, 05 Aug 2010) Log Message: ----------- Modified WriteExecutorService, DataService, AbstractScaleOutFederation, and OverflowManager to introduce the ability to force overflow even if no shard builds are necessary. This was done in order to support benchmarking against compact shards. Bug fix to AbstractTransactionService#notifyCommit(long). It was advancing the releaseTime without regard for the minimumReleaseAge if there were no transactions running. Fixes for 3 unit tests in com.bigdata.resources which were failing due to (a) the bug fixed above in the AbstractTransactionService; and (b) the historical change to the OverflowManager to purge (aka delete) resources during synchronous overflow. Fixed TestDistributedTransactionService. The tests in this suite were failing because notifyCommit(long) had at somepoint been extended to advance the release time. The mock transaction service in the unit test now explicitly overrides a method on the transaction service such that bare commits do not advance the release time. Bug fix in IndexManager where a NPE could be thrown if the previous journal had been deleted before calling listPartitions(long) with the timestamp of the lastCommitTime on that journal. Added javadoc to TestAsynchronousStatementBufferFactory#test_loadAndVerify_U1() to the effect that the unit test can hang when running the entire com.bigdata.rdf test suite with JVMs up to and including 1.6.0_17 unless you specify -XX:UseMembar as a workaround for a JVM bug. See http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6822370. com.bigdata.journal.TestTransactionService#test_newTx_readOnly() now fails occasionally. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java trunk/bigdata/src/java/com/bigdata/journal/WriteExecutorService.java trunk/bigdata/src/java/com/bigdata/resources/IndexManager.java trunk/bigdata/src/java/com/bigdata/resources/OverflowManager.java trunk/bigdata/src/java/com/bigdata/service/AbstractScaleOutFederation.java trunk/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java trunk/bigdata/src/java/com/bigdata/service/DataService.java trunk/bigdata/src/java/com/bigdata/service/DistributedTransactionService.java trunk/bigdata/src/resources/logging/log4j.properties trunk/bigdata/src/test/com/bigdata/journal/TestTransactionService.java trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java trunk/bigdata/src/test/com/bigdata/resources/TestReleaseResources.java trunk/bigdata/src/test/com/bigdata/service/StressTestConcurrent.java trunk/bigdata/src/test/com/bigdata/service/TestDistributedTransactionServiceRestart.java trunk/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/TransactionServer.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/master/TaskMaster.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.config trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java trunk/src/resources/config/bigdataCluster.config Added Paths: ----------- trunk/src/resources/scripts/dumpFed.sh Property Changed: ---------------- trunk/ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ trunk/src/resources/config/ Property changes on: trunk ___________________________________________________________________ Modified: svn:ignore - ant-build src bin bigdata*.jar ant-release standalone test* countersfinal.xml events.jnl .settings *.jnl TestInsertRate.out SYSTAP-BBT-result.txt U10load+query *.hprof com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv commit-log.txt eventLog dist bigdata-test com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv + ant-build src bin bigdata*.jar ant-release standalone test* countersfinal.xml events.jnl .settings *.jnl TestInsertRate.out SYSTAP-BBT-result.txt U10load+query *.hprof com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv commit-log.txt eventLog dist bigdata-test com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv DIST.*.tgz REL.*.tgz Modified: trunk/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -36,6 +36,7 @@ import org.apache.log4j.Logger; import com.bigdata.btree.IndexSegment.ImmutableNodeFactory.ImmutableLeaf; +import com.bigdata.io.DirectBufferPool; import com.bigdata.journal.DumpJournal; import com.bigdata.rawstore.IRawStore; @@ -154,6 +155,16 @@ } + // multi-block scan of the index segment. + boolean multiBlockScan = false; // @todo command line option. + if (multiBlockScan) { + + writeBanner("dump leaves using multi-block forward scan"); + + dumpLeavesMultiBlockForwardScan(store); + + } + // dump the leaves using a fast reverse scan. boolean fastReverseScan = true;// @todo command line option if (fastReverseScan) { @@ -524,6 +535,36 @@ } + /** + * Dump leaves using the {@link IndexSegmentMultiBlockIterator}. + * + * @param store + */ + static void dumpLeavesMultiBlockForwardScan(final IndexSegmentStore store) { + + final long begin = System.currentTimeMillis(); + + final IndexSegment seg = store.loadIndexSegment(); + + final ITupleIterator<?> itr = new IndexSegmentMultiBlockIterator(seg, DirectBufferPool.INSTANCE, + null/* fromKey */, null/* toKey */, IRangeQuery.DEFAULT/* flags */); + + int nscanned = 0; + + while(itr.hasNext()) { + + itr.next(); + + nscanned++; + + } + + final long elapsed = System.currentTimeMillis() - begin; + + System.out.println("Visited "+nscanned+" tuples using multi-block forward scan in "+elapsed+" ms"); + + } + static void writeBanner(String s) { System.out.println(bar); Modified: trunk/bigdata/src/java/com/bigdata/journal/WriteExecutorService.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/journal/WriteExecutorService.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/java/com/bigdata/journal/WriteExecutorService.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -1752,11 +1752,11 @@ // // } - /** - * Flag may be set to force overflow processing during the next group - * commit. The flag is cleared once an overflow has occurred. - */ - public final AtomicBoolean forceOverflow = new AtomicBoolean(false); +// /** +// * Flag may be set to force overflow processing during the next group +// * commit. The flag is cleared once an overflow has occurred. +// */ +// public final AtomicBoolean forceOverflow = new AtomicBoolean(false); /** * Return <code>true</code> if the pre-conditions for overflow processing @@ -1765,7 +1765,8 @@ private boolean isShouldOverflow() { return resourceManager.isOverflowEnabled() - && (forceOverflow.get() || resourceManager.shouldOverflow()); +// && (forceOverflow.get() || resourceManager.shouldOverflow()); + && resourceManager.shouldOverflow(); } @@ -1815,10 +1816,10 @@ log.error("Overflow error: "+serviceName+" : "+t, t); - } finally { - - // clear force flag. - forceOverflow.set(false); +// } finally { +// +// // clear force flag. +// forceOverflow.set(false); } Modified: trunk/bigdata/src/java/com/bigdata/resources/IndexManager.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/IndexManager.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/java/com/bigdata/resources/IndexManager.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -1684,16 +1684,28 @@ final StringBuilder sb = new StringBuilder(); final AbstractJournal journal = getJournal(timestamp); + + if (journal == null) { + /* + * This condition can occur if there are no shard views on the + * previous journal and the releaseAge is zero since the previous + * journal can be purged (deleted) before this method is invoked. + * This situation arises in a few of the unit tests which begin with + * an empty journal and copy everything onto the new journal such + * that the old journal can be immediately released. + */ + return "No journal: timestamp=" + timestamp; + } sb.append("timestamp="+timestamp+"\njournal="+journal.getResourceMetadata()); // historical view of Name2Addr as of that timestamp. - final ITupleIterator itr = journal.getName2Addr(timestamp) + final ITupleIterator<?> itr = journal.getName2Addr(timestamp) .rangeIterator(); while (itr.hasNext()) { - final ITuple tuple = itr.next(); + final ITuple<?> tuple = itr.next(); final Entry entry = EntrySerializer.INSTANCE .deserialize(new DataInputBuffer(tuple.getValue())); Modified: trunk/bigdata/src/java/com/bigdata/resources/OverflowManager.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/OverflowManager.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/java/com/bigdata/resources/OverflowManager.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -287,6 +287,14 @@ */ protected final AtomicBoolean asyncOverflowEnabled = new AtomicBoolean(true); + /** + * Flag may be set to force overflow processing during the next group + * commit. The flag is cleared by {@link #overflow()}. + * + * @see DataService#forceOverflow(boolean, boolean) + */ + public final AtomicBoolean forceOverflow = new AtomicBoolean(false); + /** * A flag that may be set to force the next asynchronous overflow to perform * a compacting merge for all indices that are not simply copied over to the @@ -295,6 +303,8 @@ * made compact and SHOULD NOT be used for deployed federations</strong>). * The state of the flag is cleared each time asynchronous overflow * processing begins. + * + * @see DataService#forceOverflow(boolean, boolean) */ public final AtomicBoolean compactingMerge = new AtomicBoolean(false); @@ -1849,6 +1859,19 @@ */ public boolean shouldOverflow() { + if(forceOverflow.get()) { + + /* + * Note: forceOverflow trumps everything else. + */ + + if (log.isInfoEnabled()) + log.info("Forcing overflow."); + + return true; + + } + if (isTransient()) { /* @@ -1886,7 +1909,7 @@ return false; } - + /* * Look for overflow condition on the "live" journal. */ @@ -1959,8 +1982,18 @@ */ public Future<Object> overflow() { - assert overflowAllowed.get(); +// assert overflowAllowed.get(); + /* + * Atomically test and clear the flag. The local boolean is inspected + * below. When true, asynchronous overflow processing will occur unless + * an error occurs during synchronous overflow processing. This ensures + * that we can force a compacting merge on the shards of a data service + * even if that data service has not buffer sufficient writes to warrant + * a build on any of the index segments. + */ + final boolean forceOverflow = this.forceOverflow.getAndSet(false/* newValue */); + final Event e = new Event(getFederation(), new EventResource(), EventType.SynchronousOverflow).addDetail( "synchronousOverflowCounter", @@ -1982,7 +2015,12 @@ if (asyncOverflowEnabled.get()) { - if (overflowMetadata.postProcess) { + /* + * Do overflow processing if overflow is being forced OR if we + * need to do a build for at least one index partition. + */ + + if (forceOverflow || overflowMetadata.postProcess) { /* * Post-processing SHOULD be performed. Modified: trunk/bigdata/src/java/com/bigdata/service/AbstractScaleOutFederation.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/service/AbstractScaleOutFederation.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/java/com/bigdata/service/AbstractScaleOutFederation.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -47,6 +47,7 @@ import com.bigdata.btree.IRangeQuery; import com.bigdata.btree.ITuple; import com.bigdata.btree.ITupleIterator; +import com.bigdata.btree.IndexSegment; import com.bigdata.journal.ITransactionService; import com.bigdata.journal.ITx; import com.bigdata.mdi.IMetadataIndex; @@ -492,25 +493,42 @@ } - /** - * Force overflow of each data service in the scale-out federation (only - * scale-out federations support overflow processing). This method is - * synchronous. It will not return until all {@link DataService}s have - * initiated and completed overflow processing. Any unused resources (as - * determined by the {@link StoreManager}) will have been purged. - * - * @param truncateJournal - * When <code>true</code>, the live journal will be truncated - * to its minimum extent (all writes will be preserved but there - * will be no free space left in the journal). This may be used - * to force the {@link DataService} to its minimum possible - * footprint. - * - * @todo when overflow processing is enabled for the {@link MetadataService} - * we will have to modify this to also trigger overflow for those - * services. - */ - public void forceOverflow(final boolean truncateJournal) { + /** + * Force overflow of each data service in the scale-out federation (only + * scale-out federations support overflow processing). This method is + * synchronous. It will not return until all {@link DataService}s have + * initiated and completed overflow processing. Any unused resources (as + * determined by the {@link StoreManager}) will have been purged. + * <p> + * This is a relatively fast operation when + * <code>compactingMerge := false</code>. By specifying both + * <code>compactingMerge := false</code> and + * <code>truncateJournal := false</code> you can cause the data services to + * close out their current journals against further writes. While this is + * not a global synchronous operation, it can provide a basis to obtain a + * "near synchronous" snapshot from the federation consisting of all writes + * up to the point where overflow was triggered on each data service. + * + * @param compactingMerge + * When <code>true</code>, each shard on each + * {@link IDataService} will undergo a compacting merge. + * Synchronous parallel compacting merge of all shards is an + * expensive operation. This parameter shoudl normally be + * <code>false</code> unless you are requesting a compacting + * merge for specific purposes, such as benchmarking when all + * data is known to exist in one {@link IndexSegment} per shard. + * @param truncateJournal + * When <code>true</code>, the live journal will be truncated to + * its minimum extent (all writes will be preserved but there + * will be no free space left in the journal). This may be used + * to force the {@link DataService} to its minimum possible + * footprint. + * + * @todo when overflow processing is enabled for the {@link MetadataService} + * we will have to modify this to also trigger overflow for those + * services. + */ + public void forceOverflow(final boolean compactingMerge, final boolean truncateJournal) { // find UUID for each data service. final UUID[] dataServiceUUIDs = getDataServiceUUIDs(0/* maxCount */); @@ -524,7 +542,7 @@ for (UUID serviceUUID : dataServiceUUIDs) { tasks.add(new ForceOverflowTask(getDataService(serviceUUID), - truncateJournal)); + compactingMerge, truncateJournal)); } @@ -641,16 +659,19 @@ .getLogger(ForceOverflowTask.class); private final IDataService dataService; + private final boolean compactingMerge; private final boolean truncateJournal; - public ForceOverflowTask(final IDataService dataService, - final boolean truncateJournal) { + public ForceOverflowTask(final IDataService dataService, + final boolean compactingMerge, final boolean truncateJournal) { if (dataService == null) throw new IllegalArgumentException(); this.dataService = dataService; + this.compactingMerge = compactingMerge; + this.truncateJournal = truncateJournal; } @@ -661,8 +682,7 @@ log.info("dataService: " + dataService.getServiceName()); // returns once synchronous overflow is complete. - dataService - .forceOverflow(true/* immediate */, true/* compactingMerge */); + dataService.forceOverflow(true/* immediate */, compactingMerge); if (log.isInfoEnabled()) log.info("Synchronous overflow is done: " Modified: trunk/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -43,7 +43,6 @@ import org.apache.log4j.Logger; -import com.bigdata.concurrent.LockManager; import com.bigdata.config.LongValidator; import com.bigdata.counters.CounterSet; import com.bigdata.counters.Instrument; @@ -80,9 +79,9 @@ */ protected static final Logger log = Logger.getLogger(AbstractTransactionService.class); - protected static final boolean INFO = log.isInfoEnabled(); +// protected static final boolean INFO = log.isInfoEnabled(); - protected static final boolean DEBUG = log.isDebugEnabled(); +// protected static final boolean DEBUG = log.isDebugEnabled(); /** * Options understood by this service. @@ -91,29 +90,39 @@ * @version $Id$ */ public interface Options { - - /** - * How long you want to hold onto the database history (in milliseconds) - * or {@link Long#MAX_VALUE} for an (effectively) immortal database. The - * {@link ITransactionService} tracks the timestamp corresponding to the - * earliest running transaction (if any). When such a transaction - * exists, the actual release time is: - * - * <pre> - * releaseTime = min(earliestRunningTx, now - minimumReleaseAge) - 1 - * </pre> - * - * This ensures that history in use by running transactions is not - * released even when the minimumReleaseAge is ZERO (0). - * - * @see #DEFAULT_MIN_RELEASE_AGE - * @see #MIN_RELEASE_AGE_1H - * @see #MIN_RELEASE_AGE_1D - * @see #MIN_RELEASE_AGE_1W - * @see #MIN_RELEASE_AGE_NEVER - * - * @see AbstractTransactionService#updateReleaseTime(long) - */ + + /** + * How long you want to hold onto the database history (in milliseconds) + * or {@link Long#MAX_VALUE} for an (effectively) immortal database. The + * {@link ITransactionService} tracks the timestamp corresponding to the + * earliest running transaction (if any). When such a transaction + * exists, the actual release time is: + * + * <pre> + * releaseTime = min(lastCommitTime - 1, min(earliestRunningTx, now - minimumReleaseAge)) + * </pre> + * + * This ensures that history in use by running transactions is not + * released even when the minimumReleaseAge is ZERO (0). + * <p> + * When no transactions exist the actual release time is: + * + * <pre> + * releaseTime = min(commitTime - 1, now - minimumReleaseAge) + * </pre> + * + * This ensures that the the release time advances when no transactions + * are in use, but that the minimum release age is still respected. + * + * @see #DEFAULT_MIN_RELEASE_AGE + * @see #MIN_RELEASE_AGE_1H + * @see #MIN_RELEASE_AGE_1D + * @see #MIN_RELEASE_AGE_1W + * @see #MIN_RELEASE_AGE_NEVER + * + * @see AbstractTransactionService#updateReleaseTime(long) + * @see AbstractTransactionService#notifyCommit(long) + */ String MIN_RELEASE_AGE = AbstractTransactionService.class.getName() + ".minReleaseAge"; @@ -231,7 +240,7 @@ Options.MIN_RELEASE_AGE, Options.DEFAULT_MIN_RELEASE_AGE)); - if (INFO) + if (log.isInfoEnabled()) log.info(Options.MIN_RELEASE_AGE + "=" + minReleaseAge); } @@ -291,7 +300,7 @@ this.runState = newval; - if (INFO) { + if (log.isInfoEnabled()) { log.info("runState=" + runState); @@ -306,7 +315,7 @@ */ public void shutdown() { - if(INFO) + if(log.isInfoEnabled()) log.info(""); lock.lock(); @@ -376,7 +385,7 @@ long elapsed = 0L; - if(INFO) + if(log.isInfoEnabled()) log.info("activeCount="+getActiveCount()); while (getActiveCount() > 0) { @@ -390,7 +399,7 @@ // update the elapsed time. elapsed = System.nanoTime() - begin; - if(INFO) + if(log.isInfoEnabled()) log.info("No transactions remaining: elapsed="+elapsed); return; @@ -456,7 +465,7 @@ */ public void shutdownNow() { - if(INFO) + if(log.isInfoEnabled()) log.info(""); lock.lock(); @@ -769,7 +778,7 @@ */ public long getReleaseTime() { - if (DEBUG) + if (log.isDebugEnabled()) log.debug("releaseTime=" + releaseTime + ", lastKnownCommitTime=" + getLastCommitTime()); @@ -789,7 +798,7 @@ if(!lock.isHeldByCurrentThread()) throw new IllegalMonitorStateException(); - if (INFO) + if (log.isInfoEnabled()) log.info("newValue=" + newValue); this.releaseTime = newValue; @@ -911,7 +920,7 @@ } - if (INFO) + if (log.isInfoEnabled()) log.info(state.toString()); // } finally { @@ -981,10 +990,15 @@ synchronized (startTimeIndex) { - isEarliestTx = startTimeIndex.findIndexOf(timestamp) == 0; + // Note: ZERO (0) is the first tuple in the B+Tree. + // Note: MINUS ONE (-1) means that the B+Tree is empty. + final int indexOf = startTimeIndex.findIndexOf(timestamp); + + isEarliestTx = indexOf == 0; - // remove start time from the index. - startTimeIndex.remove(timestamp); + // remove start time from the index. + if (indexOf != -1) + startTimeIndex.remove(timestamp); if (!isEarliestTx) { @@ -1056,7 +1070,7 @@ */ if (this.releaseTime < releaseTime) { - if (INFO) + if (log.isInfoEnabled()) log.info("lastCommitTime=" + lastCommitTime + ", earliestTxStartTime=" + earliestTxStartTime + ", minReleaseAge=" + minReleaseAge + ", now=" @@ -1086,34 +1100,63 @@ try { - synchronized (startTimeIndex) { + updateReleaseTimeForBareCommit(commitTime); + + } finally { - if (this.releaseTime < (commitTime - 1) - && startTimeIndex.getEntryCount() == 0) { + lock.unlock(); - /* - * If there are NO active transactions and the current - * releaseTime is LT (commitTime-1) then advance the - * releaseTime to (commitTime-1). - */ + } - if (INFO) - log.info("Advancing releaseTime (no active tx)."); + } - setReleaseTime(commitTime - 1); + /** + * If there are NO active transactions and the current releaseTime is LT + * (commitTime-1) then compute and set the new releaseTime. + * <p> + * Note: This method was historically part of {@link #notifyCommit(long)}. + * It was moved into its own method so it can be overriden for some unit + * tests. + * + * @throws IllegalMonitorStateException + * unless the caller is holding the lock. + */ + protected void updateReleaseTimeForBareCommit(final long commitTime) { - } + if(!lock.isHeldByCurrentThread()) + throw new IllegalMonitorStateException(); + + synchronized (startTimeIndex) { - } + if (this.releaseTime < (commitTime - 1) + && startTimeIndex.getEntryCount() == 0) { - } finally { + final long lastCommitTime = commitTime; - lock.unlock(); + final long now = _nextTimestamp(); - } + final long releaseTime = Math.min(lastCommitTime - 1, now + - minReleaseAge); + if (this.releaseTime < releaseTime) { + + if (log.isInfoEnabled()) + log.info("Advancing releaseTime (no active tx)" + + ": lastCommitTime=" + lastCommitTime + + ", minReleaseAge=" + minReleaseAge + ", now=" + + now + ", releaseTime(" + this.releaseTime + + "->" + releaseTime + ")"); + + setReleaseTime(releaseTime); + + } + + } + + } + } - + /** * Return the minimum #of milliseconds of history that must be preserved. * @@ -1698,7 +1741,7 @@ * Note: The commits requests are placed into a partial order by sorting the * total set of resources which the transaction declares (via this method) * across all operations executed by the transaction and then contending for - * locks on the named resources using a {@link LockManager}. This is + * locks on the named resources using a LockManager. This is * handled by the {@link DistributedTransactionService}. */ public void declareResources(final long tx, final UUID dataServiceUUID, @@ -2061,7 +2104,7 @@ // Note: sufficient to prevent deadlocks when there are shared indices. resources.addAll(Arrays.asList(resource)); - if (INFO) + if (log.isInfoEnabled()) log.info("dataService=" + dataService + ", resource=" + Arrays.toString(resource)); @@ -2201,7 +2244,7 @@ */ public AbstractTransactionService start() { - if(INFO) + if(log.isInfoEnabled()) log.info(""); lock.lock(); Modified: trunk/bigdata/src/java/com/bigdata/service/DataService.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/service/DataService.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/java/com/bigdata/service/DataService.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -1845,8 +1845,8 @@ public Void call() throws Exception { - final WriteExecutorService writeService = concurrencyManager - .getWriteService(); +// final WriteExecutorService writeService = concurrencyManager +// .getWriteService(); final ResourceManager resourceManager = (ResourceManager) DataService.this.resourceManager; @@ -1859,7 +1859,8 @@ } // trigger overflow on the next group commit. - writeService.forceOverflow.set(true); +// writeService.forceOverflow.set(true); + resourceManager.forceOverflow.set(true); } Modified: trunk/bigdata/src/java/com/bigdata/service/DistributedTransactionService.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/service/DistributedTransactionService.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/java/com/bigdata/service/DistributedTransactionService.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -195,7 +195,7 @@ Options.SHAPSHOT_INTERVAL, Options.DEFAULT_SHAPSHOT_INTERVAL)); - if (INFO) + if (log.isInfoEnabled()) log.info(Options.SHAPSHOT_INTERVAL + "=" + snapshotInterval); isTransient = snapshotInterval == 0; @@ -208,7 +208,7 @@ dataDir = new File(properties.getProperty(Options.DATA_DIR)); - if (INFO) + if (log.isInfoEnabled()) log.info(Options.DATA_DIR + "=" + dataDir); } @@ -218,7 +218,7 @@ setup(); - if (INFO) + if (log.isInfoEnabled()) log.info("lastCommitTime=" + lastCommitTime + ", #commitTimes=" + commitTimeIndex.getEntryCount()); @@ -1891,7 +1891,7 @@ * themselves are serialized so that we do not miss any. */ - if (DEBUG) + if (log.isDebugEnabled()) log.debug("commitTime=" + commitTime + ", lastKnownCommitTime=" Modified: trunk/bigdata/src/resources/logging/log4j.properties =================================================================== --- trunk/bigdata/src/resources/logging/log4j.properties 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/resources/logging/log4j.properties 2010-08-05 16:29:44 UTC (rev 3414) @@ -5,9 +5,9 @@ # Default log4j configuration for testing purposes. # # You probably want to set the default log level to ERROR. -# + +#log4j.rootCategory=ALL, dest2 log4j.rootCategory=WARN, dest2 -#log4j.rootCategory=WARN, dest2 # Loggers. # Note: logging here at INFO or DEBUG will significantly impact throughput! @@ -66,7 +66,8 @@ #log4j.logger.com.bigdata.resources.IndexManager=INFO #log4j.logger.com.bigdata.resources.ResourceManager=INFO #log4j.logger.com.bigdata.resources.DefaultSplitHandler=INFO -log4j.logger.com.bigdata.resources.OverflowManager=INFO +#log4j.logger.com.bigdata.resources.StoreManager=INFO +#log4j.logger.com.bigdata.resources.OverflowManager=INFO log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=INFO log4j.logger.com.bigdata.resources.AbstractResourceManagerTask=INFO #log4j.logger.com.bigdata.resources.AbstractResourceManagerTask=INFO @@ -79,7 +80,6 @@ #log4j.logger.com.bigdata.service.ndx=INFO #log4j.logger.com.bigdata.service.ndx.pipeline=DEBUG -#log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=DEBUG #log4j.logger.com.bigdata.concurrent.Latch=DEBUG #log4j.logger.com.bigdata.relation=INFO @@ -94,6 +94,7 @@ #log4j.logger.com.bigdata.relation.rule.eval.DefaultEvaluationPlan=DEBUG #log4j.logger.com.bigdata.relation.accesspath.AbstractArrayBuffer=DEBUG #log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=INFO +log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=ERROR #log4j.logger.com.bigdata.service.IBigdataFederation=DEBUG #log4j.logger.com.bigdata.service.LoadBalancerService=INFO Modified: trunk/bigdata/src/test/com/bigdata/journal/TestTransactionService.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/journal/TestTransactionService.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/test/com/bigdata/journal/TestTransactionService.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -596,15 +596,17 @@ * GT the lastCommitTime since that could allow data not yet committed to * become visible during the transaction (breaking isolation). * <p> - * A commitTime is identifed by looking up the callers timestamp in a log of + * A commitTime is identified by looking up the callers timestamp in a log of * the historical commit times and returning the first historical commit * time LTE the callers timestamp. * <p> - * The transaction start time is then choosen from the half-open interval + * The transaction start time is then chosen from the half-open interval * <i>commitTime</i> (inclusive lower bound) : <i>nextCommitTime</i> * (exclusive upper bound). * * @throws IOException + * + * @todo This test fails occasionally. I have not figured out why yet. BBT */ public void test_newTx_readOnly() throws IOException { Modified: trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -144,39 +144,43 @@ final private UUID dataServiceUUID = UUID.randomUUID(); + @Override public IBigdataFederation getFederation() { return fed; } + @Override public DataService getDataService() { throw new UnsupportedOperationException(); } + @Override public UUID getDataServiceUUID() { return dataServiceUUID; } - /** Note: no failover services. */ - public UUID[] getDataServiceUUIDs() { - - return new UUID[] { dataServiceUUID }; - - } - }; txService = new MockTransactionService(properties){ + @Override protected void setReleaseTime(long releaseTime) { super.setReleaseTime(releaseTime); + if (log.isInfoEnabled()) + log + .info("Propagating new release time to the resourceManager: releaseTime=" + + releaseTime + + ", releaseAge=" + + getMinReleaseAge()); + // propagate the new release time to the resource manager. resourceManager.setReleaseTime(releaseTime); Modified: trunk/bigdata/src/test/com/bigdata/resources/TestReleaseResources.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/resources/TestReleaseResources.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/test/com/bigdata/resources/TestReleaseResources.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -41,15 +41,6 @@ /** * Test release (aka purge) of old resources. * - * FIXME This set of unit tests needs to be updated to reflect that - * purgeResources() is invoked during synchronous overflow. The tests are - * written with a different assumption, which is why they are failing. - * - * @todo Write a unit test for purge before, during and after the 1st overflow - * and after a restart. Before, there should be nothing to release. - * During, the views that are being constructed should remain safe. After, - * we should be able to achieve a compact footprint for the data service. - * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ @@ -180,20 +171,19 @@ return properties; } - - /** - * Test creates an index whose view is initially defined by the initial - * journal on the {@link ResourceManager}. An overflow of the journal - * is then forced, which re-defines the view on the new journal. Since - * the index is very small (it is empty), it is "copied" onto the new - * journal rather than causing an index segment build to be scheduled. - * Once the asynchronous overflow completes, the original journal should - * qualify for release. - * - * @throws IOException - * @throws ExecutionException - * @throws InterruptedException - */ + + /** + * Test creates an index whose view is initially defined by the initial + * journal on the {@link ResourceManager}. An overflow of the journal is + * then forced, which re-defines the view on the new journal. Since the + * index is very small (it is empty), it is "copied" onto the new + * journal rather than causing an index segment build to be scheduled. + * The original journal should not be released. + * + * @throws IOException + * @throws ExecutionException + * @throws InterruptedException + */ public void test() throws IOException, InterruptedException, ExecutionException { @@ -204,8 +194,9 @@ final AbstractJournal j0 = resourceManager.getLiveJournal(); // force overflow on the next commit. - concurrencyManager.getWriteService().forceOverflow.set(true); - +// concurrencyManager.getWriteService().forceOverflow.set(true); + resourceManager.forceOverflow.set(true); + // disable asynchronous overflow processing to simplify the test environment. resourceManager.asyncOverflowEnabled.set(false); @@ -229,35 +220,33 @@ assertTrue(createTime0 < createTime1); // verify can still open the original journal. - // @todo unit test needs to be updated to reflect purge in sync overflow. - assertTrue(j0 == resourceManager.openStore(j0.getResourceMetadata() - .getUUID())); + assertTrue(j0 == resourceManager.openStore(j0.getResourceMetadata() + .getUUID())); // verify can still open the new journal. assertTrue(j1 == resourceManager.openStore(j1.getResourceMetadata() .getUUID())); - // 2 journals. + // 1 journals. assertEquals(2,resourceManager.getManagedJournalCount()); // no index segments. assertEquals(0,resourceManager.getManagedSegmentCount()); /* - * Verify that we can the first commit record when we provide the - * create time for a journal. + * Verify that the commit time index. */ - - // for j0 - assertEquals(j0.getRootBlockView().getFirstCommitTime(), - resourceManager - .getCommitTimeStrictlyGreaterThan(createTime0)); - // for j1. - assertEquals(j1.getRootBlockView().getFirstCommitTime(), - resourceManager - .getCommitTimeStrictlyGreaterThan(createTime1)); + // for j0 + assertEquals(j1.getRootBlockView().getFirstCommitTime(), + resourceManager + .getCommitTimeStrictlyGreaterThan(createTime1)); + // for j1. + assertEquals(j1.getRootBlockView().getFirstCommitTime(), + resourceManager + .getCommitTimeStrictlyGreaterThan(createTime1)); + /* * Verify that the resources required for [A] are {j0, j1} when the * probe commitTime is the timestamp when we registered [A] on [j0]. @@ -331,20 +320,20 @@ return properties; } - - /** - * Test creates an index whose view is initially defined by the initial - * journal on the sole data service. An overflow of the journal is then - * forced, which re-defines the view on the new journal. Since the index - * is very small (it is empty), it is "copied" onto the new journal - * rather than causing an index segment build to be scheduled. Once the - * asynchronous overflow completes, the original journal should qualify - * for release. - * - * @throws IOException - * @throws ExecutionException - * @throws InterruptedException - */ + + /** + * Test creates an index whose view is initially defined by the initial + * journal on the sole data service. An overflow of the journal is then + * forced, which re-defines the view on the new journal. Since the index + * is very small (it is empty), it is "copied" onto the new journal + * rather than causing an index segment build to be scheduled. The + * original journal should be released (deleted) during synchronous + * overflow processing. + * + * @throws IOException + * @throws ExecutionException + * @throws InterruptedException + */ public void test() throws IOException, InterruptedException, ExecutionException { @@ -358,8 +347,9 @@ final UUID uuid0 = j0.getResourceMetadata().getUUID(); // force overflow on the next commit. - concurrencyManager.getWriteService().forceOverflow.set(true); - +// concurrencyManager.getWriteService().forceOverflow.set(true); + resourceManager.forceOverflow.set(true); + // disable asynchronous overflow processing to simplify the test environment. resourceManager.asyncOverflowEnabled.set(false); @@ -371,39 +361,38 @@ // did overflow. assertEquals(1,resourceManager.getAsynchronousOverflowCount()); - /* - * Note: the old journal should have been closed for writes during - * synchronous overflow processing. - */ - // @todo unit test needs to be updated to reflect purge in sync overflow. - assertTrue(j0.isOpen()); // still open - assertTrue(j0.isReadOnly()); // but no longer accepts writes. +// /* +// * Note: the old journal should have been closed for writes during +// * synchronous overflow processing and deleted from the file system. +// */ +// assertTrue(j0.isOpen()); // still open +// assertTrue(j0.isReadOnly()); // but no longer accepts writes. +// +// /* +// * Purge old resources. If the index was copied to the new journal +// * then there should be no dependency on the old journal and it +// * should be deleted. +// */ +// { +// +// final AbstractJournal liveJournal = resourceManager +// .getLiveJournal(); +// +// final long lastCommitTime = liveJournal.getLastCommitTime(); +// +// final Set<UUID> actual = resourceManager +// .getResourcesForTimestamp(lastCommitTime); +// +// assertSameResources(new IRawStore[] {liveJournal}, actual); +// +// // only retain the lastCommitTime. +// resourceManager.setReleaseTime(lastCommitTime - 1); +// +// } +// +// resourceManager +// .purgeOldResources(1000/* ms */, false/*truncateJournal*/); - /* - * Purge old resources. If the index was copied to the new journal - * then there should be no dependency on the old journal and it - * should be deleted. - */ - { - - final AbstractJournal liveJournal = resourceManager - .getLiveJournal(); - - final long lastCommitTime = liveJournal.getLastCommitTime(); - - final Set<UUID> actual = resourceManager - .getResourcesForTimestamp(lastCommitTime); - - assertSameResources(new IRawStore[] {liveJournal}, actual); - - // only retain the lastCommitTime. - resourceManager.setReleaseTime(lastCommitTime - 1); - - } - - resourceManager - .purgeOldResources(1000/* ms */, false/*truncateJournal*/); - // verify that the old journal is no longer open. assertFalse(j0.isOpen()); @@ -475,16 +464,17 @@ super(arg0); } - - /** - * This is the minimum release time that will be used for the test. - * <P> - * Note: 20000 is 20 seconds. This is what you SHOULD use for the test. - * <p> - * Note: 200000 is 200 seconds. This can be used for debugging, but - * always restore the value so that the test will run in a reasonable - * timeframe. - */ + + /** + * This is the minimum release time that will be used for the test. + * <P> + * Note: 2000 is 2 seconds. This is what you SHOULD use for the test (it + * can be a little longer if you run into problems). + * <p> + * Note: 200000 is 200 seconds. This can be used for debugging, but + * always restore the value so that the test will run in a reasonable + * time frame. + */ final private long MIN_RELEASE_AGE = 2000; public Properties getProperties() { @@ -498,22 +488,22 @@ return properties; } - - /** - * Test where the index view is copied in its entirety onto the new - * journal and the [minReleaseAge] is 2 seconds. In this case we have no - * dependencies on the old journal, but the [minReleaseAge] is not - * satisified immediately so no resources are released during overflow - * processing (assuming that overflow processing is substantially faster - * than the [minReleaseAge]). We then wait until the [minReleaseAge] has - * passed and force overflow processing again and verify that the - * original journal was released while the 2nd and 3rd journals are - * retained. - * - * @throws IOException - * @throws ExecutionException - * @throws InterruptedException - */ + + /** + * Test where the index view is copied in its entirety onto the new + * journal and the [minReleaseAge] is 2 seconds. In this case we have no + * dependencies on the old journal, but the [minReleaseAge] is not + * satisfied immediately so no resources are released during synchronous + * overflow processing (assuming that synchronous overflow processing is + * substantially faster than the [minReleaseAge]). We then wait until + * the [minReleaseAge] has passed and force overflow processing again + * and verify that the original journal was released while the 2nd and + * 3rd journals are retained. + * + * @throws IOException + * @throws ExecutionException + * @throws InterruptedException + */ public void test() throws IOException, InterruptedException, ExecutionException { @@ -527,7 +517,8 @@ final UUID uuid0 = j0.getResourceMetadata().getUUID(); // force overflow on the next commit. - concurrencyManager.getWriteService().forceOverflow.set(true); +// concurrencyManager.getWriteService().forceOverflow.set(true); + resourceManager.forceOverflow.set(true); // disable asynchronous overflow processing to simplify the test environment. resourceManager.asyncOverflowEnabled.set(false); @@ -582,7 +573,8 @@ } // force overflow on the next commit. - concurrencyManager.getWriteService().forceOverflow.set(true); +// concurrencyManager.getWriteService().forceOverflow.set(true); + resourceManager.forceOverflow.set(true); // register another index - will force another overflow. registerIndex("B"); Modified: trunk/bigdata/src/test/com/bigdata/service/StressTestConcurrent.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/service/StressTestConcurrent.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/test/com/bigdata/service/StressTestConcurrent.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -626,20 +626,14 @@ * and when we have not since forcing overflow will trigger * compacting merges. So you are more likely to find a problem * if you DO NOT force overflow. - * - * Note: This DOES NOT guarantee that overflow is forced on a - * given data service. it is forced if the method can gain the - * exclusive write lock for that data service. otherwise it will - * timeout and overflow processing will not be triggered on that - * data service. */ final boolean forceOverflow = false; if (forceOverflow) { System.err.println("Forcing overflow: " + new Date()); - ((AbstractScaleOutFederation<?>) federation) - .forceOverflow(true/* truncateJournal */); + ((AbstractScaleOutFederation<?>) federation) + .forceOverflow(true/* compactingMerge */, true/* truncateJournal */); System.err.println("Forced overflow: " + new Date()); Modified: trunk/bigdata/src/test/com/bigdata/service/TestDistributedTransactionServiceRestart.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/service/TestDistributedTransactionServiceRestart.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/test/com/bigdata/service/TestDistributedTransactionServiceRestart.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -75,6 +75,7 @@ } + @Override protected void setReleaseTime(final long releaseTime) { lock.lock(); @@ -102,6 +103,7 @@ } + @Override public MockDistributedTransactionService start() { super.start(); @@ -110,6 +112,18 @@ } + /** + * This is overridden to be a NOP for this test suite. The unit tests in + * this suite depend on the ability to inject specific commit times into + * the transaction service without having them "release" based on the + * actual system clock. + */ + @Override + protected void updateReleaseTimeForBareCommit(final long commitTime) { + + return; + } + /** * Exposed to the unit tests. */ @@ -129,19 +143,19 @@ * * @return The array. */ - long[] toArray(CommitTimeIndex ndx) { + long[] toArray(final CommitTimeIndex ndx) { synchronized(ndx) { - long[] a = new long[ndx.getEntryCount()]; + final long[] a = new long[ndx.getEntryCount()]; - final ITupleIterator itr = ndx.rangeIterator(); + final ITupleIterator<?> itr = ndx.rangeIterator(); int i = 0; while(itr.hasNext()) { - final ITuple tuple = itr.next(); + final ITuple<?> tuple = itr.next(); a[i] = ndx.decodeKey(tuple.getKey()); @@ -160,7 +174,6 @@ * when the release time is advanced and that it is still possible to obtain * a read-only tx as of the timestamp immediately after the current release * time. - * */ public void test_setReleaseTime() { @@ -169,7 +182,10 @@ properties.setProperty(DistributedTransactionService.Options.DATA_DIR, getName()); - MockDistributedTransactionService service = new MockDistributedTransactionService( + properties.setProperty(DistributedTransactionService.Options.MIN_RELEASE_AGE, + "10"); + + final MockDistributedTransactionService service = new MockDistributedTransactionService( properties).start(); try { @@ -184,7 +200,7 @@ // verify the commit index. { - CommitTimeIndex ndx = service.getCommitTimeIndex(); + final CommitTimeIndex ndx = service.getCommitTimeIndex(); synchronized (ndx) { @@ -292,7 +308,7 @@ { - CommitTimeIndex ndx = service.getCommitTimeIndex(); + final CommitTimeIndex ndx = service.getCommitTimeIndex(); synchronized(ndx) { @@ -354,7 +370,7 @@ // verify the commit time index. { - CommitTimeIndex ndx = service.getCommitTimeIndex(); + final CommitTimeIndex ndx = service.getCommitTimeIndex(); synchronized(ndx) { Modified: trunk/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -287,6 +287,7 @@ assertNotNull(subtaskStats); + // @todo this assert fails stochastically. assertEquals("chunksOut", 1, subtaskStats.chunksOut.get()); assertEquals("elementsOut", 2, subtaskStats.elementsOut.get()); Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/TransactionServer.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/TransactionServer.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/TransactionServer.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -231,7 +231,7 @@ public Object getAdmin() throws RemoteException { - if (INFO) + if (log.isInfoEnabled()) log.info("" + getServiceUUID()); return server.proxy; Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/master/TaskMaster.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/master/TaskMaster.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/master/TaskMaster.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -113,19 +113,20 @@ */ public interface ConfigurationOptions { - /** - * When <code>true</code> as an after action on the job, the - * {@link DataService}s in the federation will be made to undergo - * asynchronous overflow processing and the live journals will be - * truncated so that the total size on disk of the federation is at its - * minimum footprint for the given history retention policy (default - * <code>false</code>). The master will block during this operation - * so you can readily tell when it is finished. Note that this option - * only makes sense in benchmark environments where you can contol the - * total system otherwise asynchronous writes may continue. - * - * @see AbstractScaleOutFederation#forceOverflow(boolean) - */ + /** + * When <code>true</code> as an after action on the job, the + * {@link DataService}s in the federation will be made to undergo + * asynchronous overflow processing, a compacting merge will be + * requested for all shards, and the live journals will be truncated so + * that the total size on disk of the federation is at its minimum + * footprint for the given history retention policy (default + * <code>false</code>). The master will block during this operation so + * you can readily tell when it is finished. Note that this option only + * makes sense in benchmark environments where you can control the total + * system otherwise asynchronous writes may continue. + * + * @see AbstractScaleOutFederation#forceOverflow(boolean, boolean) + */ String FORCE_OVERFLOW = "forceOverflow"; /** @@ -1763,7 +1764,7 @@ System.out.println("Forcing overflow: now=" + new Date()); - fed.forceOverflow(true/* truncateJournal */); + fed.forceOverflow(true/* compactingMerge */, true/* truncateJournal */); System.out.println("Forced overflow: now=" + new Date()); Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2010-08-05 16:29:44 UTC (rev 3414) @@ -787,7 +787,7 @@ System.out.println("Forcing overflow: now=" + new Date()); - fed.forceOverflow(true/* truncateJournal */); + fed.forceOverflow(true/* compactingMerge */, true/* truncateJournal */); System.out.println("Forced overflow: now=" + new Date()); Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.config =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.config 2010-08-04 20:32:16 UTC (rev 3413) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.config 2010-08-05 16:29:44 UTC (rev 3414) @@ -9,7 +9,10 @@ // Source file or directory (required). //srcDir = new File("/data/2010_btc"); - srcDir = new File("D:/bigdata-perf-analysis/btc/btc-2009-small.nq.gz"); + //srcDir = new File("D:/bigdata-perf-analysis/btc/btc-2009-small.nq.gz"); + //srcDir = new File("/nas/data/bsbm/bsbm_2785/dataset.nt.gz"); + srcDir = new File("/nas/data/bsbm/bsbm_141624/dataset.nt.gz"); + //srcDir = new File("/nas/data/bsbm/bsbm_284826/dataset.nt.gz"); // File name filter for the srcDir (optional). srcFilter = new com.bigdata.rdf.load.RDFFilenameFilter(); @@ -18,29 +21,34 @@ * known values are: "RDF/XML", "N-Triples", "Turtle", "N3", "TriX", "TriG", * "nquads". */ - srcFormat = "nquads"; + //srcFormat = "nquads"; + srcFormat = "N-Triples"; // The output directory (required). //outDir = new File("/tmp"); - outDir = new File("D:/temp"); + //outDir = new File("D:/temp"); + //outDir = new File("/nas/data/bsbm/bsbm_284826/split"); + outDir = new File("/nas/data/bsbm/bsbm_141624/split"); + //outDir = new File("/nas/data/bsbm/bsbm_2785/split"); /* The name of the default RDFFormat for the output files (optional). The * known values are: "RDF/XML", "N-Triples", "Turtle", "N3", "TriX", "TriG", * "nquads". The default is whatever the input file was. */ - outFormat = "TriG"; + //outFormat = "TriG"; + outFormat = "N-Triples"; // The output compression mode: None, Zip, or GZip. ... [truncated message content] |
From: <tho...@us...> - 2010-08-05 20:04:35
|
Revision: 3417 http://bigdata.svn.sourceforge.net/bigdata/?rev=3417&view=rev Author: thompsonbry Date: 2010-08-05 20:04:28 +0000 (Thu, 05 Aug 2010) Log Message: ----------- Added init() to ILocatableResource to allow one time initialization logic after the constructor invocation. This is a good pattern in general and was used to fix an initialization issue within the LexiconRelation and its inner ILexiconConfiguration object. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/bfs/BigdataFileSystem.java trunk/bigdata/src/java/com/bigdata/relation/AbstractResource.java trunk/bigdata/src/java/com/bigdata/relation/IMutableResource.java trunk/bigdata/src/java/com/bigdata/relation/RelationFusedView.java trunk/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java trunk/bigdata/src/java/com/bigdata/relation/locator/ILocatableResource.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/AbstractRuleFastClosure_3_5_6_7_9.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/RDFJoinNexus.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/LocalTripleStore.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/SampleExtensionFactory.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestAll.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestScaleOutTripleStoreWithEmbeddedFederation.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestScaleOutTripleStoreWithJiniFederation.java trunk/build.xml Modified: trunk/bigdata/src/java/com/bigdata/bfs/BigdataFileSystem.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/bfs/BigdataFileSystem.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata/src/java/com/bigdata/bfs/BigdataFileSystem.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -420,7 +420,7 @@ } } - + /** * Note: A commit is required in order for a read-committed view to have * access to the registered indices. When running against an Modified: trunk/bigdata/src/java/com/bigdata/relation/AbstractResource.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/relation/AbstractResource.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata/src/java/com/bigdata/relation/AbstractResource.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -582,9 +582,21 @@ } /** + * The default implementation only logs the event. + */ + public AbstractResource<E> init() { + + if (log.isInfoEnabled()) + log.info(toString()); + + return this; + + } + + /** * * @todo Lock service supporting shared locks, leases and lease renewal, - * excalation of shared locks to exclusive locks, deadlock detection, + * escalation of shared locks to exclusive locks, deadlock detection, * and possibly a resource hierarchy. Leases should be Callable * objects that are submitted by the client to its executor service so * that they will renew automatically until cancelled (and will cancel Modified: trunk/bigdata/src/java/com/bigdata/relation/IMutableResource.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/relation/IMutableResource.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata/src/java/com/bigdata/relation/IMutableResource.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -38,7 +38,10 @@ public interface IMutableResource<T> extends ILocatableResource<T> { /** - * Create any logically contained resources (relations, indices). + * Create any logically contained resources (relations, indices). There is + * no presumption that {@link #init()} is suitable for invocation from + * {@link #create()}. Instead, you are responsible for invoking {@link #init()} + * from this method IFF it is appropriate to reuse its initialization logic. */ void create(); Modified: trunk/bigdata/src/java/com/bigdata/relation/RelationFusedView.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/relation/RelationFusedView.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata/src/java/com/bigdata/relation/RelationFusedView.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -21,8 +21,8 @@ */ public class RelationFusedView<E> implements IRelation<E> { - private IRelation<E> relation1; - private IRelation<E> relation2; + final private IRelation<E> relation1; + final private IRelation<E> relation2; public IRelation<E> getRelation1() { @@ -36,6 +36,13 @@ } + // NOP + public RelationFusedView<E> init() { + + return this; + + } + /** * * @param relation1 Modified: trunk/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -586,6 +586,8 @@ properties // }); + r.init(); + if(INFO) { log.info("new instance: "+r); Modified: trunk/bigdata/src/java/com/bigdata/relation/locator/ILocatableResource.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/relation/locator/ILocatableResource.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata/src/java/com/bigdata/relation/locator/ILocatableResource.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -45,6 +45,13 @@ */ public interface ILocatableResource<T> { + /** + * Deferred initialization method is automatically invoked when the resource + * is materialized by the {@link IResourceLocator}. The implementation is + * encouraged to strengthen the return type. + */ + public ILocatableResource<T> init(); + /** * The identifying namespace. */ Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -73,26 +73,18 @@ } - public void initExtensions(final IDatatypeURIResolver resolver) { + public void initExtensions(final IDatatypeURIResolver resolver) { + + xFactory.init(resolver); + + for (IExtension extension : xFactory.getExtensions()) { + BigdataURI datatype = extension.getDatatype(); + if (datatype == null) + continue; + termIds.put((TermId) datatype.getIV(), extension); + datatypes.put(datatype.stringValue(), extension); + } - xFactory.init(resolver); - - /* - * Hacky way to know we haven't been initialized yet without using - * non-final variables. - */ - if (termIds.size() == 0 && xFactory.getExtensions().length > 0) { - - for (IExtension extension : xFactory.getExtensions()) { - BigdataURI datatype = extension.getDatatype(); - if (datatype == null) - continue; - termIds.put((TermId) datatype.getIV(), extension); - datatypes.put(datatype.stringValue(), extension); - } - - } - } public V asValue(final ExtensionIV iv, final BigdataValueFactory vf) { Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -45,6 +45,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import org.apache.log4j.Logger; import org.omg.CORBA.portable.ValueFactory; @@ -99,6 +100,7 @@ import com.bigdata.relation.AbstractRelation; import com.bigdata.relation.accesspath.IAccessPath; import com.bigdata.relation.accesspath.IElementFilter; +import com.bigdata.relation.locator.ILocatableResource; import com.bigdata.relation.locator.IResourceLocator; import com.bigdata.relation.rule.IBindingSet; import com.bigdata.relation.rule.IPredicate; @@ -447,6 +449,22 @@ } + @Override + public LexiconRelation init() { + + super.init(); + + /* + * Allow the extensions to resolve their datatype URIs into term + * identifiers. + */ + lexiconConfiguration.initExtensions(this); + + return this; + + } + + @Override public void create() { final IResourceLock resourceLock = acquireExclusiveLock(); @@ -487,6 +505,12 @@ // // assert id2term != null; + /* + * Allow the extensions to resolve their datatype URIs into term + * identifiers. + */ + lexiconConfiguration.initExtensions(this); + } finally { unlock(resourceLock); @@ -706,6 +730,9 @@ tmp = (ITextIndexer) gi.invoke(null/* object */, getIndexManager(), getNamespace(), getTimestamp(), getProperties()); + if(tmp instanceof ILocatableResource<?>) { + ((ILocatableResource<?>)tmp).init(); + } // new FullTextIndex(getIndexManager(), // getNamespace(), getTimestamp(), getProperties()) viewRef.set(tmp); @@ -2673,15 +2700,6 @@ // // } - /* - * Allow the extensions to resolve their datatype URIs into - * term identifiers. Unfortunately no way to tell whether to call this - * or not without using non-final variables. The configuration will - * have to be responsible for determining whether they are initialized - * or not (again using only final variables). Hacky. - */ - lexiconConfiguration.initExtensions(this); - return lexiconConfiguration; } Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/AbstractRuleFastClosure_3_5_6_7_9.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/AbstractRuleFastClosure_3_5_6_7_9.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/AbstractRuleFastClosure_3_5_6_7_9.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -450,7 +450,8 @@ return new RelationFusedView<ISPO>( // (IRelation<ISPO>)resourceLocator.locate(database, timestamp0), - (IRelation<ISPO>)resourceLocator.locate(focusStore, timestamp1)); + (IRelation<ISPO>)resourceLocator.locate(focusStore, timestamp1)) + .init(); } // final IAccessPath accessPath = (focusStore == null // Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/RDFJoinNexus.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/RDFJoinNexus.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/rules/RDFJoinNexus.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -734,7 +734,7 @@ final IRelation relation1 = (IRelation) resourceLocator.locate( relationName1, readTimestamp);//timestamp1); - relation = new RelationFusedView(relation0, relation1); + relation = new RelationFusedView(relation0, relation1).init(); } else { Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -1222,7 +1222,17 @@ } } + + @Override + public AbstractTripleStore init() { + super.init(); + + return this; + + } + + @Override public void create() { assertWritable(); Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/LocalTripleStore.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/LocalTripleStore.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/LocalTripleStore.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -158,7 +158,7 @@ /** * Create or re-open a triple store using a local embedded database. */ - public LocalTripleStore(Properties properties) { + /* public */LocalTripleStore(final Properties properties) { /* * FIXME This should pass up the existing properties for the KB instance @@ -189,6 +189,10 @@ create(); + } else { + + init(); + } } Modified: trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/SampleExtensionFactory.java =================================================================== --- trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/SampleExtensionFactory.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/SampleExtensionFactory.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -22,11 +22,9 @@ public void init(final IDatatypeURIResolver resolver) { - if (extensions.size() == 0) { - extensions.add(new EpochExtension(resolver)); - extensions.add(new ColorsEnumExtension(resolver)); - extensionsArray = extensions.toArray(new IExtension[2]); - } + extensions.add(new EpochExtension(resolver)); + extensions.add(new ColorsEnumExtension(resolver)); + extensionsArray = extensions.toArray(new IExtension[2]); } Modified: trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestAll.java =================================================================== --- trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestAll.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestAll.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -87,8 +87,8 @@ * @todo We should run this test suite against a CI cluster on a single machine using * the full bigdata federation rather than EDS. */ - suite.addTest(com.bigdata.rdf.store.TestScaleOutTripleStoreWithEmbeddedFederation - .suite()); +// suite.addTest(com.bigdata.rdf.store.TestScaleOutTripleStoreWithEmbeddedFederation +// .suite()); // if (Boolean.parseBoolean(System.getProperty("maven.test.services.skip", // "false")) Modified: trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestScaleOutTripleStoreWithEmbeddedFederation.java =================================================================== --- trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestScaleOutTripleStoreWithEmbeddedFederation.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestScaleOutTripleStoreWithEmbeddedFederation.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -256,7 +256,7 @@ ITx.UNISOLATED, store.getProperties() // client.getProperties() - ); + ).init(); } Modified: trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestScaleOutTripleStoreWithJiniFederation.java =================================================================== --- trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestScaleOutTripleStoreWithJiniFederation.java 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestScaleOutTripleStoreWithJiniFederation.java 2010-08-05 20:04:28 UTC (rev 3417) @@ -240,7 +240,7 @@ // obtain view of the triple store. return new ScaleOutTripleStore(helper.client.getFederation(), - namespace, ITx.UNISOLATED, store.getProperties()); + namespace, ITx.UNISOLATED, store.getProperties()).init(); } Modified: trunk/build.xml =================================================================== --- trunk/build.xml 2010-08-05 18:11:38 UTC (rev 3416) +++ trunk/build.xml 2010-08-05 20:04:28 UTC (rev 3417) @@ -1351,6 +1351,12 @@ <fileset dir="${bigdata.dir}/src/resources/scripts" /> </copy> + <!-- Copy the analysis tools. The queries directory is required for extractCounters.sh. --> + <mkdir dir="${build.dir}/src/resources/analysis" /> + <copy toDir="${build.dir}/src/resources/analysis"> + <fileset dir="${bigdata.dir}/src/resources/analysis" /> + </copy> + <copy tofile="${build.dir}/build.properties" file="build.properties" /> <copy tofile="${build.dir}/build.xml" file="build.xml" /> <copy tofile="${build.dir}/LICENSE.txt" file="LICENSE.txt" /> @@ -1988,6 +1994,13 @@ <!-- changing the appropriate property value above is all --> <!-- that should be required. --> + <!-- This is a workaround for a JVM bug which can result in a --> + <!-- lost wakeup. This bug is fixed in JDK1.6.0_18. However, --> + <!-- JDK1.6.0_18 has other problems which result in segfaults. --> + <!-- --> + <!-- http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6822370 --> + <jvmarg value="-XX:+UseMembar"/> + <sysproperty key="log4j.path" value="${bigdata.test.log4j.abs.path}"/> <sysproperty key="app.home" value="${app.home}"/> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-08-06 10:46:56
|
Revision: 3418 http://bigdata.svn.sourceforge.net/bigdata/?rev=3418&view=rev Author: thompsonbry Date: 2010-08-06 10:46:50 +0000 (Fri, 06 Aug 2010) Log Message: ----------- This implements a workaround for the problem reported by https://sourceforge.net/apps/trac/bigdata/ticket/111 and https://sourceforge.net/apps/trac/bigdata/ticket/94. The underlying problem is described in https://sourceforge.net/apps/trac/bigdata/ticket/111 and is a problem in the rules-based service startup logic. The workaround removes the TXRunningConstraint from the configuration files and modifies the StoreManager to wait for the transaction service to be discovered before proceeding with its startup. The changes made by this workaround are appropriate and close out https://sourceforge.net/apps/trac/bigdata/ticket/94, but I am going to leave https://sourceforge.net/apps/trac/bigdata/ticket/111 open since it documents the underlying problem which has not been resolved. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/journal/AbstractLocalTransactionManager.java trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/lookup/AbstractCachingServiceClient.java trunk/src/resources/config/bigdataCluster.config trunk/src/resources/config/bigdataCluster16.config Modified: trunk/bigdata/src/java/com/bigdata/journal/AbstractLocalTransactionManager.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/journal/AbstractLocalTransactionManager.java 2010-08-05 20:04:28 UTC (rev 3417) +++ trunk/bigdata/src/java/com/bigdata/journal/AbstractLocalTransactionManager.java 2010-08-06 10:46:50 UTC (rev 3418) @@ -7,6 +7,7 @@ import com.bigdata.counters.CounterSet; import com.bigdata.counters.Instrument; +import com.bigdata.resources.StoreManager; import com.bigdata.service.IBigdataFederation; import com.bigdata.service.IDataService; @@ -171,16 +172,18 @@ * Delay between attempts reach the remote service (ms). */ final long delay = 10L; - - /** - * #of attempts to reach the remote service. - * - * Note: delay*maxtries == 1000ms of trying before we give up. - * - * If this is not enough, then consider adding an optional parameter giving - * the time the caller will wait and letting the StoreManager wait longer - * during startup to discover the timestamp service. - */ + + /** + * #of attempts to reach the remote service. + * <p> + * Note: delay*maxtries == 1000ms of trying before we give up, plus however + * long we are willing to wait for service discovery if the problem is + * locating the {@link ITransactionService}. + * <p> + * If this is not enough, then consider adding an optional parameter giving + * the time the caller will wait and letting the {@link StoreManager} wait + * longer during startup to discover the timestamp service. + */ final int maxtries = 100; /** Modified: trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java 2010-08-05 20:04:28 UTC (rev 3417) +++ trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java 2010-08-06 10:46:50 UTC (rev 3418) @@ -1430,6 +1430,13 @@ } } + while (true) { + if (getFederation().getTransactionService() != null) { + break; + } + log.warn("Waiting for transaction service discovery"); + } + /* * Look for pre-existing data files. */ Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/lookup/AbstractCachingServiceClient.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/lookup/AbstractCachingServiceClient.java 2010-08-05 20:04:28 UTC (rev 3417) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/lookup/AbstractCachingServiceClient.java 2010-08-06 10:46:50 UTC (rev 3418) @@ -211,7 +211,7 @@ * Return an arbitrary service from the cache -or- <code>null</code> if * there is no such service in the cache and a remote lookup times out. */ - @SuppressWarnings("unchecked") +// @SuppressWarnings("unchecked") final public S getService() { return getService(filter); @@ -227,16 +227,18 @@ * An optional filter. If given it will be applied in addition to * the optional filter specified to the ctor. */ - @SuppressWarnings("unchecked") final public S getService(final ServiceItemFilter filter) { - ServiceItem item = getServiceItem(filter); + final ServiceItem item = getServiceItem(filter); - if (item != null) - return (S) item.service; - else - return null; - + if (item == null) + return null; + + @SuppressWarnings("unchecked") + final S service = (S)item.service; + + return service; + } /** @@ -302,7 +304,7 @@ try { - item = getServiceDiscoveryManager().lookup(template, filter, + item = serviceDiscoveryManager.lookup(template, filter, cacheMissTimeout); } catch (RemoteException ex) { Modified: trunk/src/resources/config/bigdataCluster.config =================================================================== --- trunk/src/resources/config/bigdataCluster.config 2010-08-05 20:04:28 UTC (rev 3417) +++ trunk/src/resources/config/bigdataCluster.config 2010-08-06 10:46:50 UTC (rev 3418) @@ -700,7 +700,7 @@ new JiniRunningConstraint(), new ZookeeperRunningConstraint(), - new TXRunningConstraint(), + //new TXRunningConstraint(), new HostAllowConstraint(bigdata.mds), @@ -758,6 +758,10 @@ * have for your applications! */ "-Xmx1600m",// was 800 + /* Optionally, grab all/most of the max heap at once. This makes sense for + * DS but is less necessary for other bigdata services. + */ + "-Xms800m", // 1/2 of the max heap is a good value. /* * This option will keep the JVM "alive" even when it is memory starved * but perform of a memory starved JVM is terrible. @@ -807,7 +811,7 @@ new JiniRunningConstraint(), new ZookeeperRunningConstraint(), - new TXRunningConstraint(), + //new TXRunningConstraint(), new HostAllowConstraint(bigdata.ds), Modified: trunk/src/resources/config/bigdataCluster16.config =================================================================== --- trunk/src/resources/config/bigdataCluster16.config 2010-08-05 20:04:28 UTC (rev 3417) +++ trunk/src/resources/config/bigdataCluster16.config 2010-08-06 10:46:50 UTC (rev 3418) @@ -745,7 +745,7 @@ new JiniRunningConstraint(), new ZookeeperRunningConstraint(), - new TXRunningConstraint(), + //new TXRunningConstraint(), new HostAllowConstraint(bigdata.mds), @@ -814,7 +814,9 @@ */ "-Xmx9G", // Note: out of 32 available! /* Optionally, grab all/most of the max heap at once. This makes sense for - * DS, but is less necessary for other bigdata services. + * DS, but is less necessary for other bigdata services. If the machine is + * dedicated to the DataService then use the maximum heap. Otherwise 1/2 of + * the maximum heap is a good value. */ "-Xms9G", /* @@ -888,7 +890,7 @@ new JiniRunningConstraint(), new ZookeeperRunningConstraint(), - new TXRunningConstraint(), + //new TXRunningConstraint(), new HostAllowConstraint(bigdata.ds), This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-08-06 15:02:26
|
Revision: 3419 http://bigdata.svn.sourceforge.net/bigdata/?rev=3419&view=rev Author: thompsonbry Date: 2010-08-06 15:02:14 +0000 (Fri, 06 Aug 2010) Log Message: ----------- Bug fix to some unit tests which were failing due to the changed initialization in StoreManager. Added the jgrapht dependency to the top-level build.xml file to fix some unit test failures. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java trunk/bigdata/src/java/com/bigdata/service/AbstractFederation.java trunk/build.xml Modified: trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java 2010-08-06 10:46:50 UTC (rev 3418) +++ trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java 2010-08-06 15:02:14 UTC (rev 3419) @@ -1430,16 +1430,31 @@ } } - while (true) { - if (getFederation().getTransactionService() != null) { - break; + try { + final IBigdataFederation<?> fed = getFederation(); + if (fed == null) { + /* + * Some of the unit tests do not start the txs until after + * the DataService. For those unit tests getFederation() + * will return null during startup() of the DataService. To + * have a common code path, we throw the exception here + * which is caught below. + */ + throw new UnsupportedOperationException(); } - log.warn("Waiting for transaction service discovery"); + while (true) { + if (fed.getTransactionService() != null) { + break; + } + log.warn("Waiting for transaction service discovery"); + } + } catch (UnsupportedOperationException ex) { + log.warn("Federation not available - running in test case?"); } - - /* - * Look for pre-existing data files. - */ + + /* + * Look for pre-existing data files. + */ if (!isTransient) { if (log.isInfoEnabled()) Modified: trunk/bigdata/src/java/com/bigdata/service/AbstractFederation.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/service/AbstractFederation.java 2010-08-06 10:46:50 UTC (rev 3418) +++ trunk/bigdata/src/java/com/bigdata/service/AbstractFederation.java 2010-08-06 15:02:14 UTC (rev 3419) @@ -829,7 +829,7 @@ } /** - * Delegated. + * Delegated. {@inheritDoc} */ public T getService() { @@ -840,7 +840,7 @@ } /** - * Delegated. + * Delegated. {@inheritDoc} */ public String getServiceName() { @@ -851,7 +851,7 @@ } /** - * Delegated. + * Delegated. {@inheritDoc} */ public Class getServiceIface() { @@ -862,7 +862,7 @@ } /** - * Delegated. + * Delegated. {@inheritDoc} */ public UUID getServiceUUID() { @@ -873,7 +873,7 @@ } /** - * Delegated. + * Delegated. {@inheritDoc} */ public boolean isServiceReady() { @@ -894,7 +894,7 @@ } /** - * Delegated. + * Delegated. {@inheritDoc} */ public void reattachDynamicCounters() { @@ -905,7 +905,7 @@ } /** - * Delegated. + * Delegated. {@inheritDoc} */ public void didStart() { @@ -916,7 +916,7 @@ } /** - * Delegated. + * Delegated. {@inheritDoc} */ public AbstractHTTPD newHttpd(final int httpdPort, final CounterSet counterSet) throws IOException { @@ -927,7 +927,10 @@ } - public void serviceJoin(IService service, UUID serviceUUID) { + /** + * Delegated. {@inheritDoc} + */ + public void serviceJoin(final IService service, final UUID serviceUUID) { if (!isOpen()) return; @@ -941,7 +944,10 @@ } - public void serviceLeave(UUID serviceUUID) { + /** + * Delegated. {@inheritDoc} + */ + public void serviceLeave(final UUID serviceUUID) { if(!isOpen()) return; @@ -1129,9 +1135,9 @@ // notify delegates that deferred startup has occurred. AbstractFederation.this.didStart(); + } - /** * Setup sampling on the client's thread pool. This collects interesting * statistics about the thread pool for reporting to the load balancer Modified: trunk/build.xml =================================================================== --- trunk/build.xml 2010-08-06 10:46:50 UTC (rev 3418) +++ trunk/build.xml 2010-08-06 15:02:14 UTC (rev 3419) @@ -39,8 +39,8 @@ <target name="clean" description="cleans everything in [build.dir], but not the releases."> <delete dir="${build.dir}" /> - <delete dir="${bigdata.dir}/bigdata-test" quiet="true" /> - <delete dir="${bigdata.dir}/dist" quiet="true" /> + <delete dir="${bigdata.dir}/bigdata-test" quiet="true" /> + <delete dir="${bigdata.dir}/dist" quiet="true" /> </target> <target name="prepare"> @@ -53,7 +53,7 @@ <istrue value="${snapshot}" /> </condition> <condition property="osgi.version" value="${build.ver.osgi}.${osgiDate}" else="${build.ver.osgi}.0"> - <istrue value="${snapshot}" /> + <istrue value="${snapshot}" /> </condition> <!--<echo message="today=${today}"/>--> <echo message="version=${version}" /> @@ -69,10 +69,7 @@ <!-- I set the target to 1.5 to support deployment on non-1.6 JVMs. --> <target name="compile" depends="prepare"> <mkdir dir="${build.dir}" /> - <javac destdir="${build.dir}/classes" classpathref="build.classpath" - debug="${javac.debug}" debuglevel="${javac.debuglevel}" verbose="${javac.verbose}" - encoding="${javac.encoding}" - > + <javac destdir="${build.dir}/classes" classpathref="build.classpath" debug="${javac.debug}" debuglevel="${javac.debuglevel}" verbose="${javac.verbose}" encoding="${javac.encoding}"> <!-- note: must also specify -bootclasspath and -extdirs when cross-compiling --> <!-- target="${javac.target}" source="${javac.source}" --> <src path="${bigdata.dir}/bigdata/src/java" /> @@ -85,7 +82,7 @@ <src path="${bigdata.dir}/bigdata-rdf/src/test"/> <src path="${bigdata.dir}/bigdata-sails/src/test"/> --> - <compilerarg value="-version"/> + <compilerarg value="-version" /> </javac> <!-- copy resources. --> <copy toDir="${build.dir}/classes"> @@ -107,15 +104,14 @@ <exclude name="**/package.html" /> </fileset> <fileset dir="${bigdata.dir}/bigdata-sails/src/resources/sesame-server"> - <include name="META-INF/**"/> + <include name="META-INF/**" /> </fileset> </copy> </target> <!-- Builds the bigdata JAR and bundles it together with all of its dependencies in the ${build.dir}/lib directory. --> - <target name="bundleJar" depends="bundle, jar" - description="Builds the bigdata JAR and bundles it together with all of its dependencies in the ${build.dir}/lib directory."> - <copy file="${build.dir}/${version}.jar" todir="${build.dir}/lib"/> + <target name="bundleJar" depends="bundle, jar" description="Builds the bigdata JAR and bundles it together with all of its dependencies in the ${build.dir}/lib directory."> + <copy file="${build.dir}/${version}.jar" todir="${build.dir}/lib" /> <!--<property name="myclasspath" refid="runtime.classpath" /> <echo message="${myclasspath}"/>--> </target> @@ -124,114 +120,55 @@ See 'bundleJar'. --> <target name="jar" depends="compile" description="Generates the jar (see also bundleJar)."> <jar destfile="${build.dir}/${version}.jar"> - <fileset dir="${build.dir}/classes" - excludes="test/**" /> + <fileset dir="${build.dir}/classes" excludes="test/**" /> <manifest> <!--<attribute name="Main-Class" value="com/bigdata/rdf/rio/TestRioIntegration"/>--> </manifest> </jar> </target> - - + + <!-- This generates an osgi bundle jar, and does not bundled the dependencies. See 'bundleJar'. --> <target name="osgi" depends="compile, bundle" description="Generates the osgi bundle jar (see also bundleJar)."> - <taskdef resource="aQute/bnd/ant/taskdef.properties" classpath="bigdata/lib/bnd-0.0.384.jar"/> + <taskdef resource="aQute/bnd/ant/taskdef.properties" classpath="bigdata/lib/bnd-0.0.384.jar" /> <mkdir dir="${build.dir}/bundles" /> <jar destfile="${build.dir}/bundles/com.bigdata.source_${osgi.version}.jar"> - <manifest> - <attribute name="Eclipse-SourceBundle" value='com.bigdata;version="${osgi.version}";roots="."'/> - <attribute name="Bundle-Vendor" value="Systap"/> - <attribute name="Bundle-Version" value="${build.ver.osgi}"/> - <attribute name="Bundle-ManifestVersion" value="2"/> - <attribute name="Bundle-SymbolicName" value="com.bigdata.source"/> - <attribute name="Bundle-DocURL" value="http://www.bigdata.com"/> - <attribute name="Bundle-Description" value="Bigdata Source"/> - </manifest> - <fileset dir="bigdata/src/java"/> - <fileset dir="bigdata/src/java" /> - <fileset dir="bigdata-jini/src/java" /> - <fileset dir="bigdata-rdf/src/java" /> - <fileset dir="bigdata-sails/src/java" /> - </jar> - <bnd - output="${build.dir}/bundles/com.bigata-${osgi.version}.jar" - classpath="${build.dir}/classes" - eclipse="false" - failok="false" - exceptions="true" - files="${basedir}/osgi/bigdata.bnd"/> - - <bndwrap - jars="${build.dir}/lib/unimi/colt-1.2.0.jar" - output="${build.dir}/bundles/colt-1.2.0.jar" - definitions="${basedir}/osgi/" - /> - <bndwrap - jars="${build.dir}/lib/unimi/fastutil-5.1.5.jar" - output="${build.dir}/bundles/fastutil-5.1.5.jar" - definitions="${basedir}/osgi/" - /> - <bndwrap - jars="${build.dir}/lib/ctc_utils-5-4-2005.jar" - output="${build.dir}/bundles/ctc_utils-5-4-2005.jar" - definitions="${basedir}/osgi/" - /> - <bndwrap - jars="${build.dir}/lib/cweb-commons-1.1-b2-dev.jar" - output="${build.dir}/bundles/cweb-commons-1.1.2.jar" - definitions="${basedir}/osgi/" - /> - <bndwrap - jars="${build.dir}/lib/cweb-extser-0.1-b2-dev.jar" - output="${build.dir}/bundles/cweb-extser-1.1.2.jar" - definitions="${basedir}/osgi/" - /> - <bndwrap - jars="${build.dir}/lib/dsi-utils-1.0.6-020610.jar" - output="${build.dir}/bundles/dsi-utils-1.0.6-020610.jar" - definitions="${basedir}/osgi/" - /> - <bndwrap - jars="${build.dir}/lib/iris-0.58.jar" - output="${build.dir}/bundles/iris-0.58.jar" - definitions="${basedir}/osgi/" - /> - <bndwrap - jars="${build.dir}/lib/jgrapht-jdk1.5-0.7.1.jar" - output="${build.dir}/bundles/jgrapht-jdk1.5-0.7.1.jar" - definitions="${basedir}/osgi/" - /> - <bndwrap - jars="${build.dir}/lib/lgpl-utils-1.0.6-020610.jar" - output="${build.dir}/bundles/lgpl-utils-1.0.6-020610.jar" - definitions="${basedir}/osgi/" - /> - <bndwrap - jars="${build.dir}/lib/high-scale-lib-v1.1.2.jar" - output="${build.dir}/bundles/high-scale-lib-v1.1.2.jar" - definitions="${basedir}/osgi/" - /> - <bndwrap - jars="${build.dir}/lib/openrdf-sesame-2.3.0-onejar.jar" - output="${build.dir}/bundles/openrdf-sesame-2.3.0.jar" - definitions="${basedir}/osgi/" - /> - <bndwrap - jars="${build.dir}/lib/apache/zookeeper-3.2.1.jar" - output="${build.dir}/bundles/zookeeper-3.2.1.jar" - definitions="${basedir}/osgi/" - /> - <bndwrap - jars="${build.dir}/lib/nxparser-6-22-2010.jar" - output="${build.dir}/bundles/nxparser-2010.6.22.jar" - definitions="${basedir}/osgi/" - /> + <manifest> + <attribute name="Eclipse-SourceBundle" value='com.bigdata;version="${osgi.version}";roots="."' /> + <attribute name="Bundle-Vendor" value="Systap" /> + <attribute name="Bundle-Version" value="${build.ver.osgi}" /> + <attribute name="Bundle-ManifestVersion" value="2" /> + <attribute name="Bundle-SymbolicName" value="com.bigdata.source" /> + <attribute name="Bundle-DocURL" value="http://www.bigdata.com" /> + <attribute name="Bundle-Description" value="Bigdata Source" /> + </manifest> + <fileset dir="bigdata/src/java" /> + <fileset dir="bigdata/src/java" /> + <fileset dir="bigdata-jini/src/java" /> + <fileset dir="bigdata-rdf/src/java" /> + <fileset dir="bigdata-sails/src/java" /> + </jar> + <bnd output="${build.dir}/bundles/com.bigata-${osgi.version}.jar" classpath="${build.dir}/classes" eclipse="false" failok="false" exceptions="true" files="${basedir}/osgi/bigdata.bnd" /> + + <bndwrap jars="${build.dir}/lib/unimi/colt-1.2.0.jar" output="${build.dir}/bundles/colt-1.2.0.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/unimi/fastutil-5.1.5.jar" output="${build.dir}/bundles/fastutil-5.1.5.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/ctc_utils-5-4-2005.jar" output="${build.dir}/bundles/ctc_utils-5-4-2005.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/cweb-commons-1.1-b2-dev.jar" output="${build.dir}/bundles/cweb-commons-1.1.2.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/cweb-extser-0.1-b2-dev.jar" output="${build.dir}/bundles/cweb-extser-1.1.2.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/dsi-utils-1.0.6-020610.jar" output="${build.dir}/bundles/dsi-utils-1.0.6-020610.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/iris-0.58.jar" output="${build.dir}/bundles/iris-0.58.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/jgrapht-jdk1.5-0.7.1.jar" output="${build.dir}/bundles/jgrapht-jdk1.5-0.7.1.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/lgpl-utils-1.0.6-020610.jar" output="${build.dir}/bundles/lgpl-utils-1.0.6-020610.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/high-scale-lib-v1.1.2.jar" output="${build.dir}/bundles/high-scale-lib-v1.1.2.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/openrdf-sesame-2.3.0-onejar.jar" output="${build.dir}/bundles/openrdf-sesame-2.3.0.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/apache/zookeeper-3.2.1.jar" output="${build.dir}/bundles/zookeeper-3.2.1.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/nxparser-6-22-2010.jar" output="${build.dir}/bundles/nxparser-2010.6.22.jar" definitions="${basedir}/osgi/" /> </target> - - + + <!-- Note: the javadoc requires a LOT of RAM, but runs quickly on a server class machine. @@ -241,7 +178,7 @@ <target name="javadoc" depends="prepare" if="javadoc"> <mkdir dir="${build.dir}/docs/api" /> <javadoc destdir="${build.dir}/docs/api" defaultexcludes="yes" author="true" version="true" use="true" overview="../bigdata/overview.html" windowtitle="bigdata®" classpathref="build.classpath"> - <arg value="-J-Xmx1000m"/> + <arg value="-J-Xmx1000m" /> <packageset dir="${bigdata.dir}/bigdata/src/java" /> <packageset dir="${bigdata.dir}/bigdata-jini/src/java" /> <packageset dir="${bigdata.dir}/bigdata-rdf/src/java" /> @@ -413,7 +350,7 @@ </condition> </fail> <input message="username:" addproperty="ssh.username2" defaultValue="${ssh.username2}" /> - <!-- +<!-- <input message="password:" addproperty="ssh.password2" defaultValue="${ssh.password2}" /> --> <exec executable="${ssh.scp}"> @@ -437,18 +374,17 @@ <target name="banner" depends="jar" description="Displays the banner (verifies runtime classpath)."> - <java classname="com.bigdata.Banner" - failonerror="true" fork="false" logerror="true"> - <classpath refid="runtime.classpath" /> - </java> +<java classname="com.bigdata.Banner" failonerror="true" fork="false" logerror="true"> + <classpath refid="runtime.classpath" /> +</java> </target> - <!-- --> - <!-- CLUSTER INSTALL TARGETS --> - <!-- --> - +<!-- --> +<!-- CLUSTER INSTALL TARGETS --> +<!-- --> + <!-- This is the cluster-based install. You need to edit build.properties, decide @@ -611,8 +547,8 @@ <!-- set execute bit for scripts in this directory (must be the last step). --> <chmod perm="u+x,g+rx,o-rwx"> <fileset dir="${install.bin.dir}"> - <exclude name="README"/> - <exclude name="POST-INSTALL"/> + <exclude name="README" /> + <exclude name="POST-INSTALL" /> </fileset> </chmod> <!-- Setup the status file which will be read by the bigdata script and @@ -628,20 +564,19 @@ <chmod perm="g+rw,o-rw" file="${stateFile}" /> <chmod perm="g+rw,o-rw" file="${stateLog}" /> <!-- Make sure that the entire shared directory structure is read/write for the group. --> -<chmod perm="g+rwx" type="both" dir="${NAS}" verbose="true"/> +<chmod perm="g+rwx" type="both" dir="${NAS}" verbose="true" /> <!-- Make sure that it is all accessible to the install group (ant 1.6+ plus extension module required). <chown file="${NAS}" type="both" owner="${install.user}.${install.group}" verbose="true"/> --> <!-- Works for earlier versions of ant LT 1.6 which do not bundle "chown". --> <apply executable="chown" description="set owner on NAS files" osfamily="unix"> - <arg value="-R"/> - <arg value="${install.user}.${install.group}"/> - <dirset dir="${NAS}"/> + <arg value="-R" /> + <arg value="${install.user}.${install.group}" /> + <dirset dir="${NAS}" /> </apply> <!-- @todo check the installed configuration file (after parameter substitution). --> <!-- @todo also check the installed jini configuration files. --> -<java classname="com.bigdata.jini.util.CheckConfiguration" - failonerror="true" fork="true" logerror="true"> +<java classname="com.bigdata.jini.util.CheckConfiguration" failonerror="true" fork="true" logerror="true"> <classpath refid="install.classpath" /> <arg value="${bigdata.config}" /> </java> @@ -655,92 +590,89 @@ target platform and diagnose errors related to a missing or incomplete sysstat install or other monitoring dependencies. --> <target name="test-monitoring" depends="compile" description="Run the statistics collectors for the deployment platform."> - <java classname="com.bigdata.counters.AbstractStatisticsCollector" - failonerror="true" fork="true" logerror="true"> - <classpath refid="install.classpath" /> - <jvmarg value="-Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_HOME}"/> - <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> - <jvmarg value="-Dlog4j.configuration=file:bigdata/src/resources/logging/log4j.properties"/> - <arg value="1" /><!-- interval between reports. --> - <arg value="10" /><!-- #of seconds to run. --> - </java> +<java classname="com.bigdata.counters.AbstractStatisticsCollector" failonerror="true" fork="true" logerror="true"> + <classpath refid="install.classpath" /> + <jvmarg value="-Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_HOME}" /> + <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> + <jvmarg value="-Dlog4j.configuration=file:bigdata/src/resources/logging/log4j.properties" /> + <arg value="1" /> + <!-- interval between reports. --> + <arg value="10" /> + <!-- #of seconds to run. --> +</java> </target> - + <!-- Note: we must fork the JVM to the jvmarg overrides applied. --> <!-- Note: We disable registration of log4j MBeans since that requires policy file. --> <!-- @todo add a target to launch the post-mortem counter set/events viewer. --> <target name="analysis" depends="bundleJar" description="Extracts performance counters from logged XML files."> - <java classname="com.bigdata.counters.query.CounterSetQuery" - failonerror="true" fork="true" logerror="true"> - <classpath refid="runtime.classpath" /> - <jvmarg value="-Xmx1500m" /> - <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> - <jvmarg value="-Dlog4j.configuration=file:bigdata/src/resources/logging/log4j.properties"/> - <arg value="-outputDir" /> - <arg value="${analysis.out.dir}" /> - <arg value="-mimeType" /> - <arg value="text/plain" /> - <arg value="-queries" /> - <arg file="${analysis.queries}" /> - <arg file="${analysis.counters.dir}" /> - </java> - +<java classname="com.bigdata.counters.query.CounterSetQuery" failonerror="true" fork="true" logerror="true"> + <classpath refid="runtime.classpath" /> + <jvmarg value="-Xmx1500m" /> + <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> + <jvmarg value="-Dlog4j.configuration=file:bigdata/src/resources/logging/log4j.properties" /> + <arg value="-outputDir" /> + <arg value="${analysis.out.dir}" /> + <arg value="-mimeType" /> + <arg value="text/plain" /> + <arg value="-queries" /> + <arg file="${analysis.queries}" /> + <arg file="${analysis.counters.dir}" /> +</java> + </target> - <!-- --> - <!-- LUBM TARGETS (OPTIONAL) --> - <!-- --> - +<!-- --> +<!-- LUBM TARGETS (OPTIONAL) --> +<!-- --> + <target name="lubm-clean" description="Clean the lubm-integration from the build directory."> - <delete dir="${build.dir}/lubm" /> +<delete dir="${build.dir}/lubm" /> </target> <target name="lubm-prepare" description="Clean the lubm-integration from the build directory."> - <mkdir dir="${build.dir}/lubm" /> - <mkdir dir="${build.dir}/lubm/classes" /> - <mkdir dir="${build.dir}/lubm/lib" /> +<mkdir dir="${build.dir}/lubm" /> +<mkdir dir="${build.dir}/lubm/classes" /> +<mkdir dir="${build.dir}/lubm/lib" /> </target> <path id="lubm.build.classpath" description="The lubm build-time classpath (this expects to find the bigdata JAR already installed)."> - <fileset dir="${install.lib.dir}"> - <include name="**/*.jar" /> - </fileset> +<fileset dir="${install.lib.dir}"> + <include name="**/*.jar" /> +</fileset> </path> <!-- And now for something totally weird. If you compile against the bigdata.jar rather than build.dir/classes then you will see some errors reported in LubmGeneratorMaster.java which otherwise are not reported... --> <target name="lubm-compile" depends="lubm-prepare" description="Compile the optional lubm integration."> - <javac destdir="${build.dir}/lubm/classes" classpathref="runtime.classpath" - debug="${javac.debug}" debuglevel="${javac.debuglevel}" verbose="${javac.verbose}" - encoding="${javac.encoding}" - > - <!-- note: must also specify -bootclasspath and -extdirs when cross-compiling --> - <!-- target="${javac.target}" source="${javac.source}" --> - <src path="${bigdata.dir}/bigdata-lubm/src/java" /> - </javac> - <!-- copy resources. --> - <copy toDir="${build.dir}/lubm/classes"> - <fileset dir="${bigdata.dir}/bigdata-lubm/src/java"> - <exclude name="**/*.java" /> - </fileset> - </copy> +<javac destdir="${build.dir}/lubm/classes" classpathref="runtime.classpath" debug="${javac.debug}" debuglevel="${javac.debuglevel}" verbose="${javac.verbose}" encoding="${javac.encoding}"> + <!-- note: must also specify -bootclasspath and -extdirs when cross-compiling --> + <!-- target="${javac.target}" source="${javac.source}" --> + <src path="${bigdata.dir}/bigdata-lubm/src/java" /> +</javac> +<!-- copy resources. --> +<copy toDir="${build.dir}/lubm/classes"> + <fileset dir="${bigdata.dir}/bigdata-lubm/src/java"> + <exclude name="**/*.java" /> + </fileset> +</copy> </target> <target name="lubm-jar" depends="lubm-compile" description="Generates the JAR containing the optional LUBM integration."> - <jar destfile="${build.dir}/lubm/lib/bigdata-lubm.jar"> - <fileset dir="${build.dir}/lubm/classes" /> - </jar> +<jar destfile="${build.dir}/lubm/lib/bigdata-lubm.jar"> + <fileset dir="${build.dir}/lubm/classes" /> +</jar> </target> <!-- This explicitly enumerates the lubm scripts so we don't run fixcrlf or set the execute bit on arbitrary files in the install directory. --> <fileset dir="${install.bin.dir}" id="lubm-scripts" description="The lubm scripts."> - <include name="lubmMaster.sh" /> - <include name="lubmQuery.sh" /> - <include name="lubmGen.sh" /> +<include name="lubmMaster.sh" /> +<include name="lubmQuery.sh" /> +<include name="lubmGen.sh" /> </fileset> <!-- While this installs the LUBM integration into the same place as the @@ -751,287 +683,270 @@ a model for how to install and run your own software against a bigdata federation that is already up and running. --> <target name="lubm-install" depends="lubm-jar" description="Install the optional lubm integration which may be used for benchmarking the RDF database."> - <mkdir dir="${install.lubm.dir}"/> - <mkdir dir="${install.lubm.lib.dir}"/> - <mkdir dir="${install.lubm.config.dir}"/> - <!-- install JAR. --> - <copy toDir="${install.lubm.lib.dir}" file="${build.dir}/lubm/lib/bigdata-lubm.jar"/> - <!-- install ontology, configuration files, and query files. --> - <copy toDir="${install.lubm.config.dir}"> - <fileset dir="bigdata-lubm/resources/config"/> - </copy> - <!-- install scripts. --> - <copy toDir="${install.bin.dir}"> - <fileset dir="bigdata-lubm/resources/scripts"/> - </copy> - <!-- replace will only find those @XXX@ parameters which have not yet been +<mkdir dir="${install.lubm.dir}" /> +<mkdir dir="${install.lubm.lib.dir}" /> +<mkdir dir="${install.lubm.config.dir}" /> +<!-- install JAR. --> +<copy toDir="${install.lubm.lib.dir}" file="${build.dir}/lubm/lib/bigdata-lubm.jar" /> +<!-- install ontology, configuration files, and query files. --> +<copy toDir="${install.lubm.config.dir}"> + <fileset dir="bigdata-lubm/resources/config" /> +</copy> +<!-- install scripts. --> +<copy toDir="${install.bin.dir}"> + <fileset dir="bigdata-lubm/resources/scripts" /> +</copy> +<!-- replace will only find those @XXX@ parameters which have not yet been transcribed out by the bigdata ant install. --> - <replace dir="${install.bin.dir}" summary="true"> - <replacefilter token="@NAS@" value="${NAS}" /> - <replacefilter token="@BIN_DIR@" value="${install.bin.dir}" /> - <replacefilter token="@BIGDATA_CONFIG@" value="${bigdata.config}" /> - <replacefilter token="@LUBM_CLASS_SERVER_PORT@" value="${LUBM_CLASS_SERVER_PORT}" /> - <replacefilter token="@LUBM_CLASS_SERVER_HOSTNAME@" value="${LUBM_CLASS_SERVER_HOSTNAME}" /> - <replacefilter token="@LUBM_RMI_CODEBASE_URL@" value="${LUBM_RMI_CODEBASE_URL}" /> - <replacefilter token="@install.lubm.lib.dir@" value="${install.lubm.lib.dir}" /> - <replacefilter token="@install.lubm.config.dir@" value="${install.lubm.config.dir}" /> - </replace> - <replace dir="${install.lubm.config.dir}" summary="true"> - <replacefilter token="@NAS@" value="${NAS}" /> - <replacefilter token="@BIN_DIR@" value="${install.bin.dir}" /> - <replacefilter token="@BIGDATA_CONFIG@" value="${bigdata.config}" /> - <replacefilter token="@LUBM_CLASS_SERVER_PORT@" value="${LUBM_CLASS_SERVER_PORT}" /> - <replacefilter token="@LUBM_CLASS_SERVER_HOSTNAME@" value="${LUBM_CLASS_SERVER_HOSTNAME}" /> - <replacefilter token="@LUBM_RMI_CODEBASE_URL@" value="${LUBM_RMI_CODEBASE_URL}" /> - <replacefilter token="@install.lubm.lib.dir@" value="${install.lubm.lib.dir}" /> - <replacefilter token="@install.lubm.config.dir@" value="${install.lubm.config.dir}" /> - </replace> - <!-- fix newlines (otherwise substitutions cause things to break). --> - <fixcrlf srcDir="${install.bin.dir}" > - <!-- file set not supported. <fileset refid="scripts" /> --> - </fixcrlf> - <!-- set execute bit for scripts in this directory (must be the last step). --> - <chmod perm="u+x,g+rx,o-rwx"> - <fileset refid="lubm-scripts" /> - </chmod> - <!-- Make sure that it is all accessible to the install group (ant 1.6+ plus extension module required). +<replace dir="${install.bin.dir}" summary="true"> + <replacefilter token="@NAS@" value="${NAS}" /> + <replacefilter token="@BIN_DIR@" value="${install.bin.dir}" /> + <replacefilter token="@BIGDATA_CONFIG@" value="${bigdata.config}" /> + <replacefilter token="@LUBM_CLASS_SERVER_PORT@" value="${LUBM_CLASS_SERVER_PORT}" /> + <replacefilter token="@LUBM_CLASS_SERVER_HOSTNAME@" value="${LUBM_CLASS_SERVER_HOSTNAME}" /> + <replacefilter token="@LUBM_RMI_CODEBASE_URL@" value="${LUBM_RMI_CODEBASE_URL}" /> + <replacefilter token="@install.lubm.lib.dir@" value="${install.lubm.lib.dir}" /> + <replacefilter token="@install.lubm.config.dir@" value="${install.lubm.config.dir}" /> +</replace> +<replace dir="${install.lubm.config.dir}" summary="true"> + <replacefilter token="@NAS@" value="${NAS}" /> + <replacefilter token="@BIN_DIR@" value="${install.bin.dir}" /> + <replacefilter token="@BIGDATA_CONFIG@" value="${bigdata.config}" /> + <replacefilter token="@LUBM_CLASS_SERVER_PORT@" value="${LUBM_CLASS_SERVER_PORT}" /> + <replacefilter token="@LUBM_CLASS_SERVER_HOSTNAME@" value="${LUBM_CLASS_SERVER_HOSTNAME}" /> + <replacefilter token="@LUBM_RMI_CODEBASE_URL@" value="${LUBM_RMI_CODEBASE_URL}" /> + <replacefilter token="@install.lubm.lib.dir@" value="${install.lubm.lib.dir}" /> + <replacefilter token="@install.lubm.config.dir@" value="${install.lubm.config.dir}" /> +</replace> +<!-- fix newlines (otherwise substitutions cause things to break). --> +<fixcrlf srcDir="${install.bin.dir}"> + <!-- file set not supported. <fileset refid="scripts" /> --> +</fixcrlf> +<!-- set execute bit for scripts in this directory (must be the last step). --> +<chmod perm="u+x,g+rx,o-rwx"> + <fileset refid="lubm-scripts" /> +</chmod> +<!-- Make sure that it is all accessible to the install group (ant 1.6+ plus extension module required). <chown file="${NAS}" type="both" owner="${install.user}.${install.group}" verbose="true"/> --> - <!-- Works for earlier versions of ant LT 1.6 which do not bundle "chown". --> - <apply executable="chown" description="set owner on NAS files" osfamily="unix"> - <arg value="-R"/> - <arg value="${install.user}.${install.group}"/> - <dirset dir="${install.bin.dir}"/> - </apply> - <apply executable="chown" description="set owner on NAS files" osfamily="unix"> - <arg value="-R"/> - <arg value="${install.user}.${install.group}"/> - <dirset dir="${install.lubm.dir}"/> - </apply> +<!-- Works for earlier versions of ant LT 1.6 which do not bundle "chown". --> +<apply executable="chown" description="set owner on NAS files" osfamily="unix"> + <arg value="-R" /> + <arg value="${install.user}.${install.group}" /> + <dirset dir="${install.bin.dir}" /> +</apply> +<apply executable="chown" description="set owner on NAS files" osfamily="unix"> + <arg value="-R" /> + <arg value="${install.user}.${install.group}" /> + <dirset dir="${install.lubm.dir}" /> +</apply> </target> <!-- lubm runtime classpath w/o install. --> <path id="lubm.runtime.classpath"> - <pathelement location="${build.dir}/lubm/classes"/> - <pathelement location="${build.dir}/classes" /> - <path refid="build.classpath" /> +<pathelement location="${build.dir}/lubm/classes" /> +<pathelement location="${build.dir}/classes" /> +<path refid="build.classpath" /> </path> <target name="lubm-load" depends="jar, lubm-compile" description="Load data into a configured lubm test harness, typically standalone."> - <java classname="edu.lehigh.swat.bench.ubt.Test" - failonerror="true" fork="true" logerror="true"> - <classpath refid="lubm.runtime.classpath"/> - <jvmarg value="-server" /> - <jvmarg value="-Xmx1024m" /> - <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> - <jvmarg value="-Dlog4j.configuration=file:bigdata/src/resources/logging/log4j.properties"/> - <arg value="load" /> - <arg value="bigdata-lubm/src/java/edu/lehigh/swat/bench/ubt/bigdata/config.kb.bigdata" /> - </java> - +<java classname="edu.lehigh.swat.bench.ubt.Test" failonerror="true" fork="true" logerror="true"> + <classpath refid="lubm.runtime.classpath" /> + <jvmarg value="-server" /> + <jvmarg value="-Xmx1024m" /> + <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> + <jvmarg value="-Dlog4j.configuration=file:bigdata/src/resources/logging/log4j.properties" /> + <arg value="load" /> + <arg value="bigdata-lubm/src/java/edu/lehigh/swat/bench/ubt/bigdata/config.kb.bigdata" /> +</java> + </target> - + <target name="lubm-test" depends="jar, lubm-compile" description="Run queries against a configured lubm test harness, typically standalone."> - <java classname="edu.lehigh.swat.bench.ubt.Test" - failonerror="true" fork="true" logerror="true"> - <classpath refid="lubm.runtime.classpath" /> - <jvmarg value="-server" /> - <jvmarg value="-Xmx1024m" /> - <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> - <jvmarg value="-Dlog4j.configuration=file:bigdata/src/resources/logging/log4j.properties"/> - <arg value="query" /> - <arg value="bigdata-lubm/src/java/edu/lehigh/swat/bench/ubt/bigdata/config.kb.bigdata" /> - <arg value="bigdata-lubm/src/java/edu/lehigh/swat/bench/ubt/bigdata/config.query.sparql" /> - </java> - +<java classname="edu.lehigh.swat.bench.ubt.Test" failonerror="true" fork="true" logerror="true"> + <classpath refid="lubm.runtime.classpath" /> + <jvmarg value="-server" /> + <jvmarg value="-Xmx1024m" /> + <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> + <jvmarg value="-Dlog4j.configuration=file:bigdata/src/resources/logging/log4j.properties" /> + <arg value="query" /> + <arg value="bigdata-lubm/src/java/edu/lehigh/swat/bench/ubt/bigdata/config.kb.bigdata" /> + <arg value="bigdata-lubm/src/java/edu/lehigh/swat/bench/ubt/bigdata/config.query.sparql" /> +</java> + </target> - <!-- --> - <!-- STANDALONE FEDERATION TARGETS --> - <!-- (test/benchamarking) --> +<!-- --> +<!-- STANDALONE FEDERATION TARGETS --> +<!-- (test/benchamarking) --> <target name="generateLookupStarterJar" unless="lookupStarterJarAvailable"> - <antcall target="testCompile" /> +<antcall target="testCompile" /> </target> <target name="testLookupStarterJarAvailability"> - <property name="bigdata-test.lib" location="${bigdata.dir}/bigdata-test/lib" /> - <condition property="lookupStarterJarAvailable"> - <available file="${bigdata-test.lib}/lookupstarter.jar"/> - </condition> +<property name="bigdata-test.lib" location="${bigdata.dir}/bigdata-test/lib" /> +<condition property="lookupStarterJarAvailable"> + <available file="${bigdata-test.lib}/lookupstarter.jar" /> +</condition> </target> <target name="standalone-setup" depends="testLookupStarterJarAvailability,generateLookupStarterJar" description="Setup properties used by standalone federation and LUS start/stop."> - <property name="app.home" location="${bigdata.dir}" /> - <property name="test.codebase.port" value="23333"/> - <property name="test.codebase.dir" location="${bigdata.dir}/bigdata-jini/lib/jini/lib-dl"/> - <property name="dist.lib" location="${bigdata.dir}/bigdata-jini/lib/jini/lib" /> - <property name="dist.lib.dl" location="${bigdata.dir}/bigdata-jini/lib/jini/lib-dl" /> - <property name="test.codebase" value="http://${this.hostname}:${test.codebase.port}/jsk-dl.jar"/> - <property name="java.security.policy" value="${bigdata.dir}/policy.all"/> - <property name="log4j.configuration" value="resources/logging/log4j.properties"/> - <property name="java.net.preferIPv4Stack" value="true"/> - <property name="bigdata.fedname" value="${standalone.fed}"/> +<property name="app.home" location="${bigdata.dir}" /> +<property name="test.codebase.port" value="23333" /> +<property name="test.codebase.dir" location="${bigdata.dir}/bigdata-jini/lib/jini/lib-dl" /> +<property name="dist.lib" location="${bigdata.dir}/bigdata-jini/lib/jini/lib" /> +<property name="dist.lib.dl" location="${bigdata.dir}/bigdata-jini/lib/jini/lib-dl" /> +<property name="test.codebase" value="http://${this.hostname}:${test.codebase.port}/jsk-dl.jar" /> +<property name="java.security.policy" value="${bigdata.dir}/policy.all" /> +<property name="log4j.configuration" value="resources/logging/log4j.properties" /> +<property name="java.net.preferIPv4Stack" value="true" /> +<property name="bigdata.fedname" value="${standalone.fed}" /> </target> <!-- Note: You should 'nohup' this, e.g., "nohup ant standalone-start" to avoid taking down the ServicesManagerServer if you are disconnected from a terminal. --> <target name="standalone-start" depends="jar,standalone-setup" description="Start the standalone federation."> - <!-- Start the lookup service. --> - <antcall target="startHttpd" /> - <antcall target="startLookup" /> - <java classname="com.bigdata.jini.start.ServicesManagerServer" - failonerror="true" fork="true" logerror="true"> - <classpath refid="runtime.classpath" /> - <jvmarg value="-Xmx200m" /> - <jvmarg value="-showversion"/> - <!-- The name of the federation instance. --> - <jvmarg value="-Dbigdata.fedname=${standalone.fed}"/> - <jvmarg value="-Djava.security.policy=policy.all"/> - <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> - <jvmarg value="-Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_HOME}" /> - <jvmarg value="-Dlog4j.configuration=file:src/resources/config/standalone/log4j.properties"/> - <arg value="src/resources/config/standalone/bigdataStandalone.config" /> - </java> +<!-- Start the lookup service. --> +<antcall target="startHttpd" /> +<antcall target="startLookup" /> +<java classname="com.bigdata.jini.start.ServicesManagerServer" failonerror="true" fork="true" logerror="true"> + <classpath refid="runtime.classpath" /> + <jvmarg value="-Xmx200m" /> + <jvmarg value="-showversion" /> + <!-- The name of the federation instance. --> + <jvmarg value="-Dbigdata.fedname=${standalone.fed}" /> + <jvmarg value="-Djava.security.policy=policy.all" /> + <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> + <jvmarg value="-Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_HOME}" /> + <jvmarg value="-Dlog4j.configuration=file:src/resources/config/standalone/log4j.properties" /> + <arg value="src/resources/config/standalone/bigdataStandalone.config" /> +</java> </target> <target name="standalone-stop" depends="jar,standalone-setup" description="Stop the standalone federation."> - <java classname="com.bigdata.service.jini.util.ShutdownFederation" - failonerror="true" fork="true" logerror="true"> - <classpath refid="runtime.classpath" /> - <jvmarg value="-Xmx200m" /> - <jvmarg value="-showversion"/> - <!-- The name of the federation instance. --> - <jvmarg value="-Dbigdata.fedname=${standalone.fed}"/> - <jvmarg value="-Djava.security.policy=policy.all"/> - <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> - <jvmarg value="-Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_HOME}" /> - <jvmarg value="-Dlog4j.configuration=file:src/resources/config/standalone/log4j.properties"/> - <arg value="src/resources/config/standalone/bigdataStandalone.config" /> - </java> - <!-- Then take down the lookup service as well. --> - <antcall target="stopLookup" /> - <antcall target="stopHttpd" /> +<java classname="com.bigdata.service.jini.util.ShutdownFederation" failonerror="true" fork="true" logerror="true"> + <classpath refid="runtime.classpath" /> + <jvmarg value="-Xmx200m" /> + <jvmarg value="-showversion" /> + <!-- The name of the federation instance. --> + <jvmarg value="-Dbigdata.fedname=${standalone.fed}" /> + <jvmarg value="-Djava.security.policy=policy.all" /> + <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> + <jvmarg value="-Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_HOME}" /> + <jvmarg value="-Dlog4j.configuration=file:src/resources/config/standalone/log4j.properties" /> + <arg value="src/resources/config/standalone/bigdataStandalone.config" /> +</java> +<!-- Then take down the lookup service as well. --> +<antcall target="stopLookup" /> +<antcall target="stopHttpd" /> </target> <target name="standalone-start-nano-server" depends="jar" description="Start a small http server fronting for a bigdata database instance."> - <java classname="com.bigdata.rdf.sail.bench.NanoSparqlServer" fork="true" failonerror="true"> - <arg line="${standalone.nanoServerPort} ${standalone.namespace} src/resources/config/standalone/bigdataStandalone.config" /> - <jvmarg line="-server"/> - <jvmarg line="-Xmx200M"/> - <classpath refid="runtime.classpath" /> - </java> +<java classname="com.bigdata.rdf.sail.bench.NanoSparqlServer" fork="true" failonerror="true"> + <arg line="${standalone.nanoServerPort} ${standalone.namespace} src/resources/config/standalone/bigdataStandalone.config" /> + <jvmarg line="-server" /> + <jvmarg line="-Xmx200M" /> + <classpath refid="runtime.classpath" /> +</java> </target> <target name="standalone-stop-nano-server" depends="jar" description="Stop the small http server running at the configured port."> - <java classname="com.bigdata.rdf.sail.bench.NanoSparqlServer" fork="true" failonerror="true"> - <arg line="${standalone.nanoServerPort} -stop" /> - <classpath refid="runtime.classpath" /> - </java> +<java classname="com.bigdata.rdf.sail.bench.NanoSparqlServer" fork="true" failonerror="true"> + <arg line="${standalone.nanoServerPort} -stop" /> + <classpath refid="runtime.classpath" /> +</java> </target> <target name="standalone-bulk-load" depends="jar" description="Bulk load RDF data into the standalone federation."> - <java classname="com.bigdata.rdf.load.MappedRDFDataLoadMaster" - failonerror="true" fork="true" logerror="true"> - <classpath refid="runtime.classpath" /> - <jvmarg value="-Xmx200m" /> - <jvmarg value="-showversion"/> - <!-- The name of the federation instance. --> - <jvmarg value="-Dbigdata.fedname=${standalone.fed}"/> - <jvmarg value="-Djava.security.policy=policy.all"/> - <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> - <jvmarg value="-Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_HOME}" /> - <jvmarg value="-Dlog4j.configuration=file:src/resources/config/standalone/log4j.properties"/> - <!-- --> - <!-- Per job parameters --> - <!-- --> - <!-- The namespace of the target KB. --> - <jvmarg value="-Dbigdata.rdf.namespace=${standalone.namespace}"/> - <!-- The job name (same as the KB namespace is a common default). --> - <jvmarg value="-Dbigdata.rdf.job.name=bulk-load-kb-${standalone-namespace}"/> - <!-- The file or directory containing zero or more files to be loaded first. --> - <jvmarg value="-Dbigdata.rdf.ontology=${standalone.bulkLoad.ontology}"/> - <!-- The file or directory containing RDF data to be loaded. --> - <jvmarg value="-Dbigdata.rdf.data=${standalone.bulkLoad.data}"/> - <!-- The main configuration file. --> - <arg value="src/resources/config/standalone/bigdataStandalone.config" /> - </java> +<java classname="com.bigdata.rdf.load.MappedRDFDataLoadMaster" failonerror="true" fork="true" logerror="true"> + <classpath refid="runtime.classpath" /> + <jvmarg value="-Xmx200m" /> + <jvmarg value="-showversion" /> + <!-- The name of the federation instance. --> + <jvmarg value="-Dbigdata.fedname=${standalone.fed}" /> + <jvmarg value="-Djava.security.policy=policy.all" /> + <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> + <jvmarg value="-Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_HOME}" /> + <jvmarg value="-Dlog4j.configuration=file:src/resources/config/standalone/log4j.properties" /> + <!-- --> + <!-- Per job parameters --> + <!-- --> + <!-- The namespace of the target KB. --> + <jvmarg value="-Dbigdata.rdf.namespace=${standalone.namespace}" /> + <!-- The job name (same as the KB namespace is a common default). --> + <jvmarg value="-Dbigdata.rdf.job.name=bulk-load-kb-${standalone-namespace}" /> + <!-- The file or directory containing zero or more files to be loaded first. --> + <jvmarg value="-Dbigdata.rdf.ontology=${standalone.bulkLoad.ontology}" /> + <!-- The file or directory containing RDF data to be loaded. --> + <jvmarg value="-Dbigdata.rdf.data=${standalone.bulkLoad.data}" /> + <!-- The main configuration file. --> + <arg value="src/resources/config/standalone/bigdataStandalone.config" /> +</java> </target> - <!-- --> - <!-- MISC. UTILITY TARGETS --> - <!-- --> - +<!-- --> +<!-- MISC. UTILITY TARGETS --> +<!-- --> + <target name="scale-out-sample" description="Run the scale-out sample code."> - <javac destdir="${build.dir}/classes" classpathref="build.classpath" - debug="${javac.debug}" debuglevel="${javac.debuglevel}" verbose="${javac.verbose}" - encoding="${javac.encoding}" - > - <src path="${bigdata.dir}/bigdata-sails/src/samples" /> - </javac> - <!-- copy resources. --> - <copy toDir="${build.dir}/classes"> - <fileset dir="${bigdata.dir}/bigdata-sails/src/samples"> - <exclude name="**/*.java" /> - <exclude name="**/package.html" /> - </fileset> - </copy> - <java classname="com.bigdata.samples.ScaleOut" - failonerror="true" fork="true" logerror="true"> - <classpath refid="runtime.classpath" /> - <jvmarg value="-Xmx1500m" /> - <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> - <jvmarg value="-Dlog4j.configuration=file:bigdata-sails/src/samples/com/bigdata/samples/log4j.properties"/> - <arg value="${bigdata.config}" /> - </java> - +<javac destdir="${build.dir}/classes" classpathref="build.classpath" debug="${javac.debug}" debuglevel="${javac.debuglevel}" verbose="${javac.verbose}" encoding="${javac.encoding}"> + <src path="${bigdata.dir}/bigdata-sails/src/samples" /> +</javac> +<!-- copy resources. --> +<copy toDir="${build.dir}/classes"> + <fileset dir="${bigdata.dir}/bigdata-sails/src/samples"> + <exclude name="**/*.java" /> + <exclude name="**/package.html" /> + </fileset> +</copy> +<java classname="com.bigdata.samples.ScaleOut" failonerror="true" fork="true" logerror="true"> + <classpath refid="runtime.classpath" /> + <jvmarg value="-Xmx1500m" /> + <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> + <jvmarg value="-Dlog4j.configuration=file:bigdata-sails/src/samples/com/bigdata/samples/log4j.properties" /> + <arg value="${bigdata.config}" /> +</java> + </target> <target name="DataLoader" depends="compile" description="Loads RDF data into a local KB. You MUST edit this ant target before running it."> - <java classname="com.bigdata.rdf.store.DataLoader" - fork="true" - failonerror="true" - > - <!-- usage: [-namespace namespace] propertyFile (fileOrDir)+ --> - <!-- Where: --> - <!-- [-namespace namespace] is the KB namespace (default is 'kb'). --> - <!-- propertyFile is a properties file identifying the Journal and +<java classname="com.bigdata.rdf.store.DataLoader" fork="true" failonerror="true"> + <!-- usage: [-namespace namespace] propertyFile (fileOrDir)+ --> + <!-- Where: --> + <!-- [-namespace namespace] is the KB namespace (default is 'kb'). --> + <!-- propertyFile is a properties file identifying the Journal and giving various Journal and/or kb configuration properties if one or the other needs to be created. --> - <!-- (fileOrDir)+ is a list of one or more RDF files or directories to + <!-- (fileOrDir)+ is a list of one or more RDF files or directories to be loaded. zip and gz extensions are recognized, but only one file is loaded per archive. --> - <arg line="custom.properties fileOrDir"/> - <jvmarg value="-server"/> - <!-- Specify the maximum Java heap size. --> - <jvmarg value="-Xmx10g"/> - <!-- optionally enable yourkit profiler. + <arg line="custom.properties fileOrDir" /> + <jvmarg value="-server" /> + <!-- Specify the maximum Java heap size. --> + <jvmarg value="-Xmx10g" /> + <!-- optionally enable yourkit profiler. <jvmarg value="-DLD_LIBRARY_PATH=/nas/install/yjp-8.0.19/bin/linux-x86-64"/> <jvmarg value="-agentpath:/nas/install/yjp-8.0.20/bin/linux-x86-64/libyjpagent.so"/> <jvmarg value="-agentlib:yjpagent=disableexceptiontelemetry,disablestacktelemetry"/> --> - <jvmarg value="-XX:+UseParallelOldGC"/> - <!-- Optional enable GC trace. + <jvmarg value="-XX:+UseParallelOldGC" /> + <!-- Optional enable GC trace. <jvmarg line="-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -Xloggc:jvm_gc.log"/> --> - <classpath> - <path refid="runtime.classpath" /> - </classpath> - </java> + <classpath> + <path refid="runtime.classpath" /> + </classpath> +</java> </target> -<target name="set-properties" depends="compile" - description="Set or change properties for a kb instance. You MUST edit this target to specify the name of the journal. The new values are read from stdin."> - <java classname="com.bigdata.rdf.sail.BigdataSailHelper" - fork="true" - failonerror="true" - inputstring="com.bigdata.relation.rule.eval.DefaultRuleTaskFactory.nestedSubquery=false" - > -<!-- Various things you might want to change: +<target name="set-properties" depends="compile" description="Set or change properties for a kb instance. You MUST edit this target to specify the name of the journal. The new values are read from stdin."> +<java classname="com.bigdata.rdf.sail.BigdataSailHelper" fork="true" failonerror="true" inputstring="com.bigdata.relation.rule.eval.DefaultRuleTaskFactory.nestedSubquery=false"> + <!-- Various things you might want to change: Maximum #of threads for joins. inputstring="com.bigdata.relation.rule.eval.ProgramTask.maxParallelSubqueries=5" @@ -1040,530 +955,466 @@ inputstring="com.bigdata.relation.rule.eval.DefaultRuleTaskFactory.nestedSubquery=true" --> - <arg line="d:/LTS.U50.jnl LTS kb"/> - <classpath> - <path refid="runtime.classpath" /> - </classpath> - </java> + <arg line="d:/LTS.U50.jnl LTS kb" /> + <classpath> + <path refid="runtime.classpath" /> + </classpath> +</java> </target> - <!-- --> - <!-- STAGING --> - <!-- --> - <target name="stage" - description="stages resources (jar, config, policy, logging files) needed to package or execute the bigdata distribution." - depends="jar"> +<!-- --> +<!-- STAGING --> +<!-- --> +<target name="stage" description="stages resources (jar, config, policy, logging files) needed to package or execute the bigdata distribution." depends="jar"> - <!-- Create staging directories --> - <property name="dist.dir" location="${bigdata.dir}/dist/bigdata" /> +<!-- Create staging directories --> +<property name="dist.dir" location="${bigdata.dir}/dist/bigdata" /> - <property name="dist.bin" location="${dist.dir}/bin" /> - <property name="dist.bin.config" location="${dist.bin}/config" /> - <property name="dist.lib" location="${dist.dir}/lib" /> - <property name="dist.lib.dl" location="${dist.dir}/lib-dl" /> - <property name="dist.lib.ext" location="${dist.dir}/lib-ext" /> - <property name="dist.var" location="${dist.dir}/var" /> +<property name="dist.bin" location="${dist.dir}/bin" /> +<property name="dist.bin.config" location="${dist.bin}/config" /> +<property name="dist.lib" location="${dist.dir}/lib" /> +<property name="dist.lib.dl" location="${dist.dir}/lib-dl" /> +<property name="dist.lib.ext" location="${dist.dir}/lib-ext" /> +<property name="dist.var" location="${dist.dir}/var" /> - <property name="dist.var.config" location="${dist.var}/config" /> - <property name="dist.var.config.policy" location="${dist.var.config}/policy" /> - <property name="dist.var.config.logging" location="${dist.var.config}/logging" /> - <property name="dist.var.config.jini" location="${dist.var.config}/jini" /> +<property name="dist.var.config" location="${dist.var}/config" /> +<property name="dist.var.config.policy" location="${dist.var.config}/policy" /> +<property name="dist.var.config.logging" location="${dist.var.config}/logging" /> +<property name="dist.var.config.jini" location="${dist.var.config}/jini" /> - <delete dir="${dist.dir}" quiet="true"/> - <mkdir dir="${dist.dir}"/> - <mkdir dir="${dist.bin}"/> - <mkdir dir="${dist.lib}"/> - <mkdir dir="${dist.lib.dl}"/> - <mkdir dir="${dist.lib.ext}"/> - <mkdir dir="${dist.var}"/> - <mkdir dir="${dist.var.config}"/> - <mkdir dir="${dist.var.config.policy}"/> - <mkdir dir="${dist.var.config.logging}"/> - <mkdir dir="${dist.var.config.jini}"/> +<delete dir="${dist.dir}" quiet="true" /> +<mkdir dir="${dist.dir}" /> +<mkdir dir="${dist.bin}" /> +<mkdir dir="${dist.lib}" /> +<mkdir dir="${dist.lib.dl}" /> +<mkdir dir="${dist.lib.ext}" /> +<mkdir dir="${dist.var}" /> +<mkdir dir="${dist.var.config}" /> +<mkdir dir="${dist.var.config.policy}" /> +<mkdir dir="${dist.var.config.logging}" /> +<mkdir dir="${dist.var.config.jini}" /> - <!-- Copy the jar files created by the jar target to --> - <!-- an application-specific but non-version-specific --> - <!-- jar file to either the lib or lib-dl staging --> - <!-- directory. When a new version of a given application's --> - <!-- jar file becomes available, the version-specific jar --> - <!-- file name should be changed here. --> +<!-- Copy the jar files created by the jar target to --> +<!-- an application-specific but non-version-specific --> +<!-- jar file to either the lib or lib-dl staging --> +<!-- directory. When a new version of a given application's --> +<!-- jar file becomes available, the version-specific jar --> +<!-- file name should be changed here. --> - <property name="bigdata.lib" location="${bigdata.dir}/bigdata/lib" /> - <property name="bigdata-jini.dir" location="${bigdata.dir}/bigdata-jini" /> - <property name="bigdata-jini.lib" location="${bigdata.dir}/bigdata-jini/lib/jini/lib" /> - <property name="bigdata-rdf.lib" location="${bigdata.dir}/bigdata-rdf/lib" /> - <property name="bigdata-sails.lib" location="${bigdata.dir}/bigdata-sails/lib" /> - <property name="bigdata-zookeeper.lib" location="${bigdata.dir}/bigdata-jini/lib/apache" /> +<property name="bigdata.lib" location="${bigdata.dir}/bigdata/lib" /> +<property name="bigdata-jini.dir" location="${bigdata.dir}/bigdata-jini" /> +<property name="bigdata-jini.lib" location="${bigdata.dir}/bigdata-jini/lib/jini/lib" /> +<property name="bigdata-rdf.lib" location="${bigdata.dir}/bigdata-rdf/lib" /> +<property name="bigdata-sails.lib" location="${bigdata.dir}/bigdata-sails/lib" /> +<property name="bigdata-zookeeper.lib" location="${bigdata.dir}/bigdata-jini/lib/apache" /> - <!-- Utility libraries --> +<!-- Utility libraries --> - <copy file="${bigdata.lib}/unimi/colt-1.2.0.jar" - tofile="${dist.lib}/colt.jar"/> - <copy file="${bigdata.lib}/ctc_utils-5-4-2005.jar" - tofile="${dist.lib}/ctc_utils.jar"/> - <copy file="${bigdata.lib}/cweb-commons-1.1-b2-dev.jar" - tofile="${dist.lib}/cweb-commons.jar"/> - <copy file="${bigdata.lib}/cweb-extser-0.1-b2-dev.jar" - tofile="${dist.lib}/cweb-extser.jar"/> - <copy file="${bigdata.lib}/high-scale-lib-v1.1.2.jar" - tofile="${dist.lib}/highscalelib.jar"/> - <copy file="${bigdata.lib}/dsi-utils-1.0.6-020610.jar" - tofile="${dist.lib}/dsiutils.jar"/> - <copy file="${bigdata.lib}/lgpl-utils-1.0.6-020610.jar" - tofile="${dist.lib}/lgplutils.jar"/> - <copy file="${bigdata.lib}/unimi/fastutil-5.1.5.jar" - tofile="${dist.lib}/fastutil.jar"/> - <copy file="${bigdata.lib}/icu/icu4j-3_6.jar" - tofile="${dist.lib}/icu4j.jar"/> - <copy file="${bigdata.lib}/apache/log4j-1.2.15.jar" - tofile="${dist.lib}/log4j.jar"/> - <copy file="${bigdata.lib}/lucene/lucene-analyzers-3.0.0.jar" - tofile="${dist.lib}/lucene-analyzer.jar"/> - <copy file="${bigdata.lib}/lucene/lucene-core-3.0.0.jar" - tofile="${dist.lib}/lucene-core.jar"/> +<copy file="${bigdata.lib}/unimi/colt-1.2.0.jar" tofile="${dist.lib}/colt.jar" /> +<copy file="${bigdata.lib}/ctc_utils-5-4-2005.jar" tofile="${dist.lib}/ctc_utils.jar" /> +<copy file="${bigdata.lib}/cweb-commons-1.1-b2-dev.jar" tofile="${dist.lib}/cweb-commons.jar" /> +<copy file="${bigdata.lib}/cweb-extser-0.1-b2-dev.jar" tofile="${dist.lib}/cweb-extser.jar" /> +<copy file="${bigdata.lib}/high-scale-lib-v1.1.2.jar" tofile="${dist.lib}/highscalelib.jar" /> +<copy file="${bigdata.lib}/dsi-utils-1.0.6-020610.jar" tofile="${dist.lib}/dsiutils.jar" /> +<copy file="${bigdata.lib}/lgpl-utils-1.0.6-020610.jar" tofile="${dist.lib}/lgplutils.jar" /> +<copy file="${bigdata.lib}/unimi/fastutil-5.1.5.jar" tofile="${dist.lib}/fastutil.jar" /> +<copy file="${bigdata.lib}/icu/icu4j-3_6.jar" tofile="${dist.lib}/icu4j.jar" /> +<copy file="${bigdata.lib}/apache/log4j-1.2.15.jar" tofile="${dist.lib}/log4j.jar" /> +<copy file="${bigdata.lib}/lucene/lucene-analyzers-3.0.0.jar" tofile="${dist.lib}/lucene-analyzer.jar" /> +<copy file="${bigdata.lib}/lucene/lucene-core-3.0.0.jar" tofile="${dist.lib}/lucene-core.jar" /> - <!-- RDF library --> +<!-- RDF library --> - <copy file="${bigdata-rdf.lib}/iris-0.58.jar" - tofile="${dist.lib}/iris.jar"/> - <copy file="${bigdata-rdf.lib}/openrdf-sesame-2.3.0-onejar.jar" - tofile="${dist.lib}/openrdf-sesame.jar"/> - <copy file="${bigdata-rdf.lib}/slf4j-api-1.4.3.jar" - tofile="${dist.lib}/slf4j.jar"/> - <copy file="${bigdata-rdf.lib}/slf4j-log4j12-1.4.3.jar" - tofile="${dist.lib}/slf4j-log4j.jar"/> +<copy file="${bigdata-rdf.lib}/iris-0.58.jar" tofile="${dist.lib}/iris.jar" /> +<copy file="${bigdata-rdf.lib}/jgrapht-jdk1.5-0.7.1.jar" tofile="${dist.lib}/jgrapht.jar" /> +<copy file="${bigdata-rdf.lib}/openrdf-sesame-2.3.0-onejar.jar" tofile="${dist.lib}/openrdf-sesame.jar" /> +<copy file="${bigdata-rdf.lib}/slf4j-api-1.4.3.jar" tofile="${dist.lib}/slf4j.jar" /> +<copy file="${bigdata-rdf.lib}/slf4j-log4j12-1.4.3.jar" tofile="${dist.lib}/slf4j-log4j.jar" /> - <!-- NxParser (RDF NQuads support) --> - <copy file="${bigdata-rdf.lib}/nxparser-6-22-2010.jar" - tofile="${dist.lib}/nxparser.jar"/> - - <!-- Zookeeper library --> - <copy file="${bigdata-zookeeper.lib}/zookeeper-3.2.1.jar" - tofile="${dist.lib}/zookeeper.jar"/> +<!-- NxParser (RDF NQuads support) --> +<copy file="${bigdata-rdf.lib}/nxparser-6-22-2010.jar" tofile="${dist.lib}/nxparser.jar" /> - <!-- Jini library --> +<!-- Zookeeper library --> +<copy file="${bigdata-zookeeper.lib}/zookeeper-3.2.1.jar" tofile="${dist.lib}/zookeeper.jar" /> - <copy file="${bigdata-jini.lib}/browser.jar" - todir="${dist.lib}"/> - <copy file="${bigdata-jini.lib}/classserver.jar" - todir="${dist.lib}"/> - <copy file="${bigdata-jini.lib}/jsk-lib.jar" - todir="${dist.lib}"/> - <copy file="${bigdata-jini.lib}/jsk-platform.jar" - todir="${dist.lib}"/> - <copy file="${bigdata-jini.lib}/jsk-resources.jar" - todir="${dist.lib}"/> - <copy file="${bigdat... [truncated message content] |
From: <tho...@us...> - 2010-08-06 15:46:16
|
Revision: 3423 http://bigdata.svn.sourceforge.net/bigdata/?rev=3423&view=rev Author: thompsonbry Date: 2010-08-06 15:46:07 +0000 (Fri, 06 Aug 2010) Log Message: ----------- Rationalized the bigdata cluster configuration files slightly and added one for a single node cluster (bigdataStandalone.config). Modified build.xml to remove all the "standalone" targets and to state the new bigdataStandalone.config" file. Modified Paths: -------------- trunk/build.xml trunk/src/resources/config/README trunk/src/resources/config/bigdataCluster.config trunk/src/resources/config/bigdataCluster16.config Added Paths: ----------- trunk/src/resources/config/bigdataStandalone.config Modified: trunk/build.xml =================================================================== --- trunk/build.xml 2010-08-06 15:14:23 UTC (rev 3422) +++ trunk/build.xml 2010-08-06 15:46:07 UTC (rev 3423) @@ -777,118 +777,8 @@ </java> </target> - + <!-- --> -<!-- STANDALONE FEDERATION TARGETS --> -<!-- (test/benchamarking) --> - -<target name="generateLookupStarterJar" unless="lookupStarterJarAvailable"> -<antcall target="testCompile" /> -</target> - -<target name="testLookupStarterJarAvailability"> -<property name="bigdata-test.lib" location="${bigdata.dir}/bigdata-test/lib" /> -<condition property="lookupStarterJarAvailable"> - <available file="${bigdata-test.lib}/lookupstarter.jar" /> -</condition> -</target> - -<target name="standalone-setup" depends="testLookupStarterJarAvailability,generateLookupStarterJar" description="Setup properties used by standalone federation and LUS start/stop."> -<property name="app.home" location="${bigdata.dir}" /> -<property name="test.codebase.port" value="23333" /> -<property name="test.codebase.dir" location="${bigdata.dir}/bigdata-jini/lib/jini/lib-dl" /> -<property name="dist.lib" location="${bigdata.dir}/bigdata-jini/lib/jini/lib" /> -<property name="dist.lib.dl" location="${bigdata.dir}/bigdata-jini/lib/jini/lib-dl" /> -<property name="test.codebase" value="http://${this.hostname}:${test.codebase.port}/jsk-dl.jar" /> -<property name="java.security.policy" value="${bigdata.dir}/policy.all" /> -<property name="log4j.configuration" value="resources/logging/log4j.properties" /> -<property name="java.net.preferIPv4Stack" value="true" /> -<property name="bigdata.fedname" value="${standalone.fed}" /> -</target> - -<!-- Note: You should 'nohup' this, e.g., "nohup ant standalone-start" to - avoid taking down the ServicesManagerServer if you are disconnected - from a terminal. --> -<target name="standalone-start" depends="jar,standalone-setup" description="Start the standalone federation."> -<!-- Start the lookup service. --> -<antcall target="startHttpd" /> -<antcall target="startLookup" /> -<java classname="com.bigdata.jini.start.ServicesManagerServer" failonerror="true" fork="true" logerror="true"> - <classpath refid="runtime.classpath" /> - <jvmarg value="-Xmx200m" /> - <jvmarg value="-showversion" /> - <!-- The name of the federation instance. --> - <jvmarg value="-Dbigdata.fedname=${standalone.fed}" /> - <jvmarg value="-Djava.security.policy=policy.all" /> - <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> - <jvmarg value="-Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_HOME}" /> - <jvmarg value="-Dlog4j.configuration=file:src/resources/config/standalone/log4j.properties" /> - <arg value="src/resources/config/standalone/bigdataStandalone.config" /> -</java> -</target> - -<target name="standalone-stop" depends="jar,standalone-setup" description="Stop the standalone federation."> -<java classname="com.bigdata.service.jini.util.ShutdownFederation" failonerror="true" fork="true" logerror="true"> - <classpath refid="runtime.classpath" /> - <jvmarg value="-Xmx200m" /> - <jvmarg value="-showversion" /> - <!-- The name of the federation instance. --> - <jvmarg value="-Dbigdata.fedname=${standalone.fed}" /> - <jvmarg value="-Djava.security.policy=policy.all" /> - <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> - <jvmarg value="-Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_HOME}" /> - <jvmarg value="-Dlog4j.configuration=file:src/resources/config/standalone/log4j.properties" /> - <arg value="src/resources/config/standalone/bigdataStandalone.config" /> -</java> -<!-- Then take down the lookup service as well. --> -<antcall target="stopLookup" /> -<antcall target="stopHttpd" /> -</target> - -<target name="standalone-start-nano-server" depends="jar" description="Start a small http server fronting for a bigdata database instance."> -<java classname="com.bigdata.rdf.sail.bench.NanoSparqlServer" fork="true" failonerror="true"> - <arg line="${standalone.nanoServerPort} ${standalone.namespace} src/resources/config/standalone/bigdataStandalone.config" /> - <jvmarg line="-server" /> - <jvmarg line="-Xmx200M" /> - <classpath refid="runtime.classpath" /> -</java> -</target> - -<target name="standalone-stop-nano-server" depends="jar" description="Stop the small http server running at the configured port."> -<java classname="com.bigdata.rdf.sail.bench.NanoSparqlServer" fork="true" failonerror="true"> - <arg line="${standalone.nanoServerPort} -stop" /> - <classpath refid="runtime.classpath" /> -</java> -</target> - -<target name="standalone-bulk-load" depends="jar" description="Bulk load RDF data into the standalone federation."> -<java classname="com.bigdata.rdf.load.MappedRDFDataLoadMaster" failonerror="true" fork="true" logerror="true"> - <classpath refid="runtime.classpath" /> - <jvmarg value="-Xmx200m" /> - <jvmarg value="-showversion" /> - <!-- The name of the federation instance. --> - <jvmarg value="-Dbigdata.fedname=${standalone.fed}" /> - <jvmarg value="-Djava.security.policy=policy.all" /> - <jvmarg value="-Dcom.bigdata.jmx.log4j.disable=true" /> - <jvmarg value="-Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_HOME}" /> - <jvmarg value="-Dlog4j.configuration=file:src/resources/config/standalone/log4j.properties" /> - <!-- --> - <!-- Per job parameters --> - <!-- --> - <!-- The namespace of the target KB. --> - <jvmarg value="-Dbigdata.rdf.namespace=${standalone.namespace}" /> - <!-- The job name (same as the KB namespace is a common default). --> - <jvmarg value="-Dbigdata.rdf.job.name=bulk-load-kb-${standalone-namespace}" /> - <!-- The file or directory containing zero or more files to be loaded first. --> - <jvmarg value="-Dbigdata.rdf.ontology=${standalone.bulkLoad.ontology}" /> - <!-- The file or directory containing RDF data to be loaded. --> - <jvmarg value="-Dbigdata.rdf.data=${standalone.bulkLoad.data}" /> - <!-- The main configuration file. --> - <arg value="src/resources/config/standalone/bigdataStandalone.config" /> -</java> -</target> - -<!-- --> <!-- MISC. UTILITY TARGETS --> <!-- --> @@ -1122,9 +1012,9 @@ <!-- Stage the bigdata Jini config files --> +<copy file="${src.resources.config}/bigdataStandalone.config" todir="${dist.var.config.jini}" /> <copy file="${src.resources.config}/bigdataCluster.config" todir="${dist.var.config.jini}" /> <copy file="${src.resources.config}/bigdataCluster16.config" todir="${dist.var.config.jini}" /> -<copy file="${src.resources.config}/standalone/bigdataStandalone.config" todir="${dist.var.config.jini}" /> <!-- Stage the infrastructure service config files --> Modified: trunk/src/resources/config/README =================================================================== --- trunk/src/resources/config/README 2010-08-06 15:14:23 UTC (rev 3422) +++ trunk/src/resources/config/README 2010-08-06 15:46:07 UTC (rev 3423) @@ -3,8 +3,10 @@ bigdataStandalone.config - A sample configuration file for a workstation. -bigdataCluster.config - A sample configuration file for a cluster. +bigdataCluster.config - A sample configuration file for a 3-node cluster. +bigdataCluster.config - A sample configuration file for a 16-node cluster. + log4j.properties - A default log4j configuration file for use by the bigdata clients and services. Modified: trunk/src/resources/config/bigdataCluster.config =================================================================== --- trunk/src/resources/config/bigdataCluster.config 2010-08-06 15:14:23 UTC (rev 3422) +++ trunk/src/resources/config/bigdataCluster.config 2010-08-06 15:46:07 UTC (rev 3423) @@ -1200,15 +1200,6 @@ static private awaitDataServicesTimeout = 8000; /* Multiplier for the scatter effect. - * - * Note: TERM2ID tends to grow more slowly than the other indices for two - * reasons. First, there are many more distinct RDF Statements than RDF - * Values for nearly any data set (except if statement identifiers are enabled, - * in which case there are more terms than statements). Second, the keys of - * the TERM2ID index compress nicely since long prefixes are very common. - * Therefore it makes sense to use a smaller scatter factor for this index - * UNLESS you have only 2-3 data services, in which case you will see hot - * spots develop with this index unless it is more widely distributed. */ static private scatterFactor = 2; static private scatterFactor_term2id = 2; // use 1 @ 4DS and up. Modified: trunk/src/resources/config/bigdataCluster16.config =================================================================== --- trunk/src/resources/config/bigdataCluster16.config 2010-08-06 15:14:23 UTC (rev 3422) +++ trunk/src/resources/config/bigdataCluster16.config 2010-08-06 15:46:07 UTC (rev 3423) @@ -1305,13 +1305,6 @@ static private awaitDataServicesTimeout = 8000; /* Multiplier for the scatter effect. - * - * Note: TERM2ID tends to grow more slowly than the other indices for two - * reasons. First, there are many more distinct RDF Statements than RDF - * Values for nearly any data set (except if statement identifiers are enabled, - * in which case there are more terms than statements). Second, the keys of - * the TERM2ID index compress nicely since long prefixes are very common. - * Therefore it makes sense to use a smaller scatter factor for this index. */ static private scatterFactor = 2; static private scatterFactor_term2id = 1; Added: trunk/src/resources/config/bigdataStandalone.config =================================================================== --- trunk/src/resources/config/bigdataStandalone.config (rev 0) +++ trunk/src/resources/config/bigdataStandalone.config 2010-08-06 15:46:07 UTC (rev 3423) @@ -0,0 +1,1886 @@ +import net.jini.jeri.BasicILFactory; +import net.jini.jeri.BasicJeriExporter; +import net.jini.jeri.tcp.TcpServerEndpoint; + +import net.jini.discovery.LookupDiscovery; +import net.jini.core.discovery.LookupLocator; +import net.jini.core.entry.Entry; +import net.jini.lookup.entry.Name; +import net.jini.lookup.entry.Comment; +import net.jini.lookup.entry.Address; +import net.jini.lookup.entry.Location; +import net.jini.lookup.entry.ServiceInfo; +import net.jini.core.lookup.ServiceTemplate; + +import java.io.File; + +import com.bigdata.util.NV; +import com.bigdata.journal.BufferMode; +import com.bigdata.jini.lookup.entry.*; +import com.bigdata.service.IBigdataClient; +import com.bigdata.service.jini.*; +import com.bigdata.service.jini.lookup.DataServiceFilter; +import com.bigdata.service.jini.master.ServicesTemplate; +import com.bigdata.jini.start.config.*; +import com.bigdata.jini.util.ConfigMath; + +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.data.ACL; +import org.apache.zookeeper.data.Id; + +// imports for various options. +import com.bigdata.btree.IndexMetadata; +import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.spo.SPORelation; +import com.bigdata.rdf.spo.SPOKeyOrder; +import com.bigdata.rdf.lexicon.LexiconRelation; +import com.bigdata.rdf.lexicon.LexiconKeyOrder; +import com.bigdata.rawstore.Bytes; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeUnit.*; + +/* + * This is a sample configuration file for a bigdata federation. + * + * Note: The original file is a template. The template contains parameters + * of the form @XXX@. The values for those template parameters are specified + * in the build.properties file when you use ant to install bigdata. + * + * Note: This file uses the jini configuration mechanism. The syntax + * is a subset of Java. The properties for each component are grouped + * within the namespace for that component. + * + * See the net.jini.config.ConfigurationFile javadoc for more + * information. + */ + +/* + * A namespace use for static entries referenced elsewhere in this + * ConfigurationFile. + */ +bigdata { + + /** + * The name for this federation. + * + * Note: This is used to form the [zroot] (root node in zookeeper + * for the federation) and the [serviceDir] (path in the file + * system for persistent state for the federation). + * + * Note: If you will be running more than one federation, then you + * MUST use unicast discovery and specify the federation name in + * the [groups]. + */ + static private fedname = "@FED@"; + + /** + * Where to put all the persistent state. + */ + static private serviceDir = new File("@LAS@"); + + /** + * Which JDK to use. + */ + static private javaHome = new File("@JAVA_HOME@"); + + /** + * A common point to set the Zookeeper client's requested + * sessionTimeout and the jini lease timeout. The default lease + * renewal period for jini is 5 minutes while for zookeeper it is + * more like 5 seconds. This puts the two systems onto a similar + * timeout period so that a disconnected client is more likely to + * be noticed in roughly the same period of time for either + * system. A value larger than the zookeeper default helps to + * prevent client disconnects under sustained heavy load. + */ + // jini + static private leaseTimeout = ConfigMath.m2ms(60);// 20s=20000; 5m=300000; + // zookeeper + static private sessionTimeout = (int)ConfigMath.m2ms(10);// was 5m 20s=20000; 5m=300000; + + /* + * Example cluster configuration. + * + * Data services are load balanced. Index partitions will be + * moved around as necessary to ensure hosts running data + * service(s) are neither under nor over utilized. Data services + * can be very resource intensive processes. They heavily buffer + * both reads and writes, and they use RAM to do so. They also + * support high concurrency and can use up to one thread per index + * partition. How many cores they will consume is very much a + * function of the application. + * + * Zookeeper services use a quorum model. Always allocate an odd + * number. 3 gives you one failure. 5 gives you two failures. + * Zookeeper will sync the disk almost continuously while it is + * running. It really deserves its own local disk. Zookeeper + * also runs in memory. Since all operations are serialized, if + * it starts swapping then peformance will drop through the floor. + * + * Jini uses a peer model. Each service registers with each + * registrar that it discovers. Each client listeners to each + * registrar that it discovers. The default jini core services + * installation runs entirely in memory (no disk operations, at + * least not for service registration). A second instance of the + * jini core services provides a safety net. If you are using + * multicast then you can always add another instance. + */ + + /* Declare the hosts. This provides indirection for planning + * purposes. + * + * The summary notation is: cores@GHZ/cache x RAM x DISK + */ + static private h0 = "192.168.1.50"; // 4@3ghz/1kb x 4GB x 263G + //static private h1 = "192.168.20.27"; // 4@3ghz/2kb x 4GB x 263G + //static private h2 = "192.168.20.28"; // 4@3ghz/1kb x 4GB x 64G + + /* Note: this configuration puts things that are not disk intensive + * on the host with the least disk space and zookeeper. + */ + static private lbs = h0; // specify as @LOAD_BALANCER_HOST@ ? + static private txs = h0; + static private mds = h0; + + // 1+ jini servers + static private jini1 = h0; + //static private jini2 = h1; + static private jini = new String[]{ jini1 }; //,jini2}; + + // Either 1 or 3 zookeeper machines (one instance per). + // See the QuorumPeerMain and ZooKeeper configurations below. + static private zoo1 = h0; + //static private zoo2 = h1; + //static private zoo3 = h2; + static private zoo = new String[] { zoo1 }; // ,zoo2,zoo3}; + + // 1+ client service machines (1+ instance per host). + static private cs0 = h0; + + // 1+ data service machines (1+ instance per host). + static private ds0 = h0; + static private ds1 = h1; + + // client servers + static private cs = new String[] { + cs0 //, ... + }; + + // The target #of client servers. + static private clientServiceCount = 1; + static private maxClientServicePerHost = 1; + + // data servers + static private ds = new String[]{ + ds0//, ds1 //, ... + }; + + // The target #of data services. + static private dataServiceCount = 1; + + // Maximum #of data services per host. + static private maxDataServicesPerHost = 1; + + // @todo also specify k (replicationCount) + + // Sets the initial and maximum journal extents. + static private journalExtent = ConfigMath.multiply(200, Bytes.megabyte); + + /** + * A String[] whose values are the group(s) to be used for discovery + * (no default). Note that multicast discovery is always used if + * LookupDiscovery.ALL_GROUPS (a <code>null</code>) is specified. + */ + + // one federation, multicast discovery. + //static private groups = LookupDiscovery.ALL_GROUPS; + + // unicast discovery or multiple federations, MUST specify groups. + static private groups = new String[]{bigdata.fedname}; + + /** + * One or more unicast URIs of the form <code>jini://host/</code> + * or <code>jini://host:port/</code> (no default). + * + * This MAY be an empty array if you want to use multicast + * discovery <strong>and</strong> you have specified the groups as + * LookupDiscovery.ALL_GROUPS (a <code>null</code>). + */ + static private locators = new LookupLocator[] { + + // runs jini on the localhost using unicast locators. + //new LookupLocator("jini://localhost/") + + // runs jini on two hosts using unicast locators. + new LookupLocator("jini://"+jini1), + //new LookupLocator("jini://"+jini2), + + }; + + /** + * The policy file that will be used to start services. + */ + private static policy = "@POLICY_FILE@"; + + /** + * log4j configuration file (applies to bigdata and zookeeper). + * + * Note: The value is URI! + * + * Note: You should aggregate all of the log output to a single + * host. For example, using the log4j SocketAppender and the + * SimpleNodeServer. + */ + log4j = "@LOG4J_CONFIG@"; + + /** + * java.util.logging configuration file (applies to jini as used + * within bigdata). + * + * Note: The value is a file path! + */ + logging = "@LOGGING_CONFIG@"; + + /* + private static host = ConfigUtil.getHostName(); + private static port = "8081"; + private static jskdl = " http://" + host + ":" + port + "/jsk-dl.jar"; + */ + + /** + * JVM argument may be used to enable the yourkit profiler agent on a + * service. Of course, yourkit must be installed at this location and + * you must have a licensed copy of the yourkit UI running either on a + * node of the cluster or on a machine routed to the cluster, e.g., via + * an ssh tunnel. The yourkit profiler uses ports in [10001:100010] by + * default on each node. + * + * See http://www.yourkit.com/docs/80/help/running_with_profiler.jsp + * + * See http://www.yourkit.com/docs/80/help/agent.jsp + * + * See http://www.yourkit.com/docs/80/help/additional_agent_options.jsp + * + * Note: Conditionally include ${profilerAgent} iff you want to enable + * profiling for some service class. + */ + + // linux-64 with all profiling options initially disabled. + profilerAgent="-agentpath:/usr/java/yjp-9.0.3/bin/linux-x86-64/libyjpagent.so=disableexceptiontelemetry,disablestacktelemetry"; + +} + +/* + * Service configuration defaults. These can also be specified on a + * per service-type basis. When the property is an array type, the + * value here is concatenated with the optional array value on the per + * service-type configuration. Otherwise it is used iff no value is + * specified for the service-type configuration. + */ +com.bigdata.jini.start.config.ServiceConfiguration { + + /* + * Default java command line arguments that will be used for all + * java-based services + * + * Note: [-Dcom.sun.jini.jeri.tcp.useNIO=true] enables NIO in + * combination with the [exporter] configured below. + */ + defaultJavaArgs = new String[]{ + "-server", + "-ea", + "-showversion", + //"-Xmx2G", + /* This is a workaround for a JVM bug which can result in a + * lost wakeup. This bug is fixed in JDK1.6.0_18. However, + * JDK1.6.0_18 has other problems which result in segfaults. + * + * http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6822370 + */ + "-XX:+UseMembar", + "-Dcom.sun.jini.jeri.tcp.useNIO=@USE_NIO@", + "-Djava.security.policy="+bigdata.policy, + "-Djava.util.logging.config.file="+bigdata.logging, + "-Dcom.bigdata.counters.linux.sysstat.path=@SYSSTAT_HOME@", + //bigdata.profilerAgent, + }; + + /* Default path for service instances and their persistent + * data. This may be overriden on a per service-type basis. + * + * Note: For logical services that support failover, the concrete + * service directory is assigned dynamically when a physical + * service instance is created. + */ + serviceDir = bigdata.serviceDir; + + // The JVM to use. + javaHome = bigdata.javaHome; + + /* The bigdata services default logging configuration (a URI!) + */ + log4j = bigdata.log4j; + + /* + * Set up some default properties values that will be inherited + * (copy by value) by all clients and services started using this + * configuration file. + */ + properties = new NV[] { + + /* + * Each JiniClient (and hence all bigdata services) can run an + * httpd that will expose performance counters for the service and + * the host on which it is running. This property specifies the + * port for that httpd service. Valid values are port number, + * zero (0) for a random open port, MINUS ONE (-1) to disable the + * httpd service. + */ + //new NV(IBigdataClient.Options.HTTPD_PORT, "-1"), + + /* + * Option to disable collection of performance counters for the + * host on which the client or service is running. + * + * Note: The load balancer relies on this information! + */ + //new NV(IBigdataClient.Options.COLLECT_PLATFORM_STATISTICS,"false"), + + /* Option to disable collection of performance counters on the + * queues used internally by the client or service. + * + * Note: The load balancer relies on this information! + */ + //new NV(IBigdataClient.Options.COLLECT_QUEUE_STATISTICS,"false"), + + /* Option controls how many times a client request will be + * reissued on receiving notice that an index partition locator is + * stale. Stale locators arise when an index partition is split, + * moved, or joined. + * + * Note: This option needs to be larger if we are aggressively + * driving journal overflows and index partitions splits during + * the "young" phase of a data service or scale-out index since a + * LOT of redirects will result. + */ + new NV(IBigdataClient.Options.CLIENT_MAX_STALE_LOCATOR_RETRIES,"1000"), + + }; + +} + +/** + * JoinManager options. + * + * Note: These options must be copied into the service.config (to + * specify the service lease timeout) as well as used by the client + * (which uses this file directly). + */ +net.jini.lookup.JoinManager { + + // The lease timeout for jini joins. + maxLeaseDuration = bigdata.leaseTimeout; + +} + +/** + * Jini service configuration. + */ +jini { + + /* This sets command line arguments for the ServiceStarter which + * is used to run the jini services. + */ + args = new String[] { + + "-Xmx400m", + "-Djava.security.policy="+bigdata.policy, + "-Djava.util.logging.config.file="+bigdata.logging, + "-Dlog4j.configuration="+bigdata.log4j, + "-Dlog4j.primary.configuration="+bigdata.log4j, + "-DinitialMemberGroups="+bigdata.fedname + + }; + + /** + * The main jini configuration file. This file contains a + * NonActivatableServiceDescriptor[]. The elements of that array + * describe how to start each of the jini services. + */ + configFile = new File("@JINI_CONFIG@"); + + /** + * The #of instances to run. + * + * Note: A jini service instance may be started on a host if it is + * declared in [locators]. If locators is empty, then you are + * using multicast discovery. In this case an instance may be + * started on any host, unless [constraints] are imposed. In any + * case, no more than [serviceCount] jini services will be started + * at any given time. This is checked against the #of discovered + * instances. + */ + serviceCount = 1; + +} + +/** + * Zookeeper server configuration. + */ +org.apache.zookeeper.server.quorum.QuorumPeerMain { + + /* Directory for zookeeper's persistent state. The [id] will be + * appended as another path component automatically to keep + * instances separate. + */ + dataDir = new File(bigdata.serviceDir,"zookeeper"); + + /* Optional directory for the zookeeper log files. The [id] will + * be appended as another path component automatically to keep + * instances separate. + * + * Note: A dedicated local storage device is highly recommended + * for the zookeeper transaction logs! + */ + //dataLogDir=new File("/var/zookeeper-log"); + + // required. + clientPort=2181; + + tickTime=2000; + + initLimit=5; + + syncLimit=2; + + /* A comma delimited list of the known zookeeper servers together + * with their assigned "myid": {myid=host:port(:port)}+ + * + * Note: You SHOULD specify the full list of servers that are + * available to the federation. An instance of zookeeper will be + * started automatically on each host running ServicesManager that + * is present in the [servers] list IF no instance is found + * running on that host at the specified [clientPort]. + * + * Note: zookeeper interprets NO entries as the localhost with + * default peer and leader ports. This will work as long as the + * localhost is already running zookeeper. However, zookeeper + * WILL NOT automatically start zookeeper if you do not specify + * the [servers] property. You can also explicitly specify + * "localhost" as the hostname, but that only works for a single + * machine. + */ + // standalone + //servers="1=localhost:2888:3888"; + // ensemble + /**/ + servers = "1="+bigdata.zoo1+":2888:3888" +// + ",2="+bigdata.zoo2+":2888:3888" +// + ",3="+bigdata.zoo3+":2888:3888" + ; + + // This is all you need to run zookeeper. + classpath = new String[] { + "@LIB_DIR@/apache/zookeeper-3.2.1.jar", + "@LIB_DIR@/apache/log4j-1.2.15.jar" + }; + + /* Optional command line arguments for the JVM used to execute + * zookeeper. + * + * Note: swapping for zookeeper is especially bad since the + * operations are serialized, so if anything hits then disk then + * all operations in the queue will have that latency as well. + * However, bigdata places a very light load on + * zookeeper so a modest heap should be Ok. For example, I have + * observed a process size of only 94m after 10h on a 15-node + * cluster. + */ + args = new String[]{ + "-Xmx200m", + /* + * Enable JXM remote management. + * + "-Dcom.sun.management.jmxremote.port=9997", + "-Dcom.sun.management.jmxremote.authenticate=false", + "-Dcom.sun.management.jmxremote.ssl=false", + */ +}; + + // zookeeper server logging configuration (value is a URI!) + log4j = bigdata.log4j; + +} + +/* + * Zookeeper client configuration. + */ +org.apache.zookeeper.ZooKeeper { + + /* Root znode for the federation instance. */ + zroot = "/"+bigdata.fedname; + + /* A comma separated list of host:port pairs, where the port is + * the CLIENT port for the zookeeper server instance. + */ + // standalone. + // servers = "localhost:2181"; + // ensemble + servers = bigdata.zoo1+":2181" // @TODO enable other instances. +// + ","+bigdata.zoo2+":2181" +// + ","+bigdata.zoo3+":2181" + ; + + /* Session timeout (optional). */ + sessionTimeout = bigdata.sessionTimeout; + + /* + * ACL for the zookeeper nodes created by the bigdata federation. + * + * Note: zookeeper ACLs are not transmitted over secure channels + * and are placed into plain text Configuration files by the + * ServicesManagerServer. + */ + acl = new ACL[] { + + new ACL(ZooDefs.Perms.ALL, new Id("world", "anyone")) + + }; + +} + +/* + * Jini client configuration + */ +com.bigdata.service.jini.JiniClient { + + /* Default Entry[] for jini services. Also used by the + * ServicesManagerService as is. + * + * Note: A Name attribute will be added automatically using the + * service type and the znode of the service instance. That Name + * will be canonical. It is best if additional service names are + * NOT specified as that might confuse somethings :-) + * + * Note: A Hostname attribute will be added dynamically. + */ + entries = new Entry[] { + // Purely informative. + new Comment(bigdata.fedname), + }; + + groups = bigdata.groups; + + locators = bigdata.locators; + + // optional JiniClient properties. + // properties = new NV[] {}; + + /* + * Overrides for jini SERVICES (things which are started + * automatically) BUT NOT CLIENTs (things which you start by hand + * and which read this file directly). + * + * The difference here is whether or not a service.config file is + * being generated. When it is, the jiniOptions[] will be + * included in how that service is invoked and will operate as + * overrides for the parameters specified in the generated + * service.config file. However, normal clients directly consume + * this config file rather than the generated one and therefore + * you must either specify their overrides directly on the command + * line when you start the client or specify them explicitly in + * the appropriate component section within this configuration + * file. + * + * In practice, this means that you must specify some parameters + * both here and in the appropriate component configuration. E.g., + * see the component section for "net.jini.lookup.JoinManager" + * elsewhere in this file. + */ + jiniOptions = new String[] { + + // The lease timeout for jini joins. + "net.jini.lookup.JoinManager.maxLeaseDuration="+bigdata.leaseTimeout, + + }; + +} + +/** + * Options for the bigdata services manager. + */ +com.bigdata.jini.start.ServicesManagerServer { + + /* + * This object is used to export the service proxy. The choice + * here effects the protocol that will be used for communications + * between the clients and the service. + */ + exporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(0), + new BasicILFactory()); + + /* + * The data directory and the file on which the serviceID will be + * written. + * + * Note: These properties MUST be specified explicitly for the + * ServicesManager since it uses this as its Configuration file. + * For other services, it generates the Configuration file and + * will generate this property as well. + */ + + serviceDir = new File(bigdata.serviceDir,"ServicesManager"); + + serviceIdFile = new File(serviceDir,"service.id"); + + /* The services that will be started. For each service, there + * must be a corresponding component defined within this + * configuration file. For each "ManagedServiceConfiguration", an + * entry will be made in zookeeper and logical and physical + * service instances will be managed automatically. For unmanaged + * services, such as jini and zookeeper itself, instances will be + * started iff necessary by the services manager when it starts + * up. + */ + services = new String[] { + + "jini", + "org.apache.zookeeper.server.quorum.QuorumPeerMain", + "com.bigdata.service.jini.TransactionServer", + "com.bigdata.service.jini.MetadataServer", + "com.bigdata.service.jini.DataServer", + "com.bigdata.service.jini.LoadBalancerServer", + "com.bigdata.service.jini.ClientServer" + + }; + + /* + * Additional properties passed through to the JiniClient or the + * service. + * + * Note: The services manager is used to collect statistics from the + * OS for each host so we have performance counters for hosts which + * are only running non-bigdata services, such as jini or zookeeper. + */ + properties = new NV[]{ + + }; + + /* The services manager MUDT be run on every host so that it may + * start both bigdata and non-bigdata services (jini, zookeeper). + * This is also used to report per-host performance counters to + * the load balancer for hosts that are not running bigdata + * services. + */ + constraints = new IServiceConstraint[] { + + }; + +} + +com.bigdata.service.jini.TransactionServer { + + constraints = new IServiceConstraint[] { + + new JiniRunningConstraint(), + new ZookeeperRunningConstraint(), + + new HostAllowConstraint(bigdata.txs) + + }; + + args = new String[]{ + + // Does not need much RAM. + "-Xmx200m" + + }; + + properties = new NV[] { + + /* The #of milliseconds that the database will retain history no + * longer required to support the earliest active transaction. + * + * A value of ZERO means that only the last commit point will + * be retained. The larger the value the more history will be + * retained. You can use a really big number if you never want + * to release history and you have lots of disk space :-) + * + * Note: The most recent committed state of the database is + * NEVER released. + */ + new NV(TransactionServer.Options.MIN_RELEASE_AGE, "0"), + + }; + +} + +com.bigdata.service.jini.MetadataServer { + + constraints = new IServiceConstraint[] { + + new JiniRunningConstraint(), + new ZookeeperRunningConstraint(), + //new TXRunningConstraint(), + + new HostAllowConstraint(bigdata.mds), + + }; + + args = new String[]{ + + // Does not need much RAM. + "-Xmx200m" + + }; + + properties = new NV[]{ + + /* + * The MDS does not support overflow at this time so + * overflow MUST be disabled for this service. + */ + new NV(MetadataServer.Options.OVERFLOW_ENABLED,"false") + + }; + +} + +com.bigdata.service.jini.DataServer { + + args = new String[]{ + //bigdata.profilerAgent, + /* + * Grant lots of memory, but read on. + * + * Note: 32-bit JVMs have a 2G limit on the heap, but the practical limit + * is often much less - maybe 1400m. 64-bit JVMs can use much more RAM. + * However, the heap which you grant to java DOES NOT determine the total + * process heap. I have seen 64-bit java processes using an additional + * 3-4GB of heap beyond what is specified here. So, you need to consider + * the total RAM, subtract out enough for the other processes and the OS + * buffers, divide by the #of client/data services you plan to run on that + * host (generally 1-2) and then subtract out some more space for the JVM + * itself. + * + * For example, if you have 32G RAM and a 64-bit JVM and plan to run two + * CS/DS on the host, I would recommend 10G for the Java heap. You can + * expect to see Java grab another 4G per process over time. That makes + * the per CS/DS heap 14G. With two processes you have taken 28G leaving + * 4G for everything else. + * + * Here is another example: 4G RAM, 32-bit JVM, and 2 CS/DS per host. I + * would stick to 800m for the Java heap. You don't have a problem unless + * you see an OOM (OutOfMemoryException) or a process killed because GC is + * taking too much time. + * + * See http://www.ibm.com/developerworks/linux/library/j-nativememory-linux/index.html?ca=dgr-lnxw07Linux-JVM&S_TACT=105AGX59&S_CMP=grlnxw07 + * + * Note: for linux, "sysctl -w vm.swappiness=0" will keep the RAM you do + * have for your applications! + */ + "-Xmx4g",// was 800 + /* Optionally, grab all/most of the max heap at once. This makes sense for + * DS but is less necessary for other bigdata services. + */ + "-Xms2G", // 1/2 of the max heap is a good value. + /* + * This option will keep the JVM "alive" even when it is memory starved + * but perform of a memory starved JVM is terrible. + */ + //"-XX:-UseGCOverheadLimit", + /* Configure GC for higher throughput. Together these options + * request parallel old generation collection using N threads. + * The application will be paused when this occurs, but GC will + * be faster. Hence throughput will be higher. However, be + * sure to use JDK 6u10+ (6676016 : ParallelOldGC leaks memory). + * + * http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6676016 + */ + "-XX:+UseParallelOldGC", + //"-XX:ParallelGCThreads=8", + /* + * Enable JXM remote management for the data service. + * + * Note: This will not work if you have two data services on a host + * because it will assign the same port to each service. In order + * to work around that the argument would have to be specified by + * the service starter and then published in the Entry[] attributes. + * + * However, you can use ssh -X to open a tunnel with X + * forwarding and then run jconsole locally on the target host + * and bring up these data services without enabling remote + * JMX. + * + "-Dcom.sun.management.jmxremote.port=9999", + "-Dcom.sun.management.jmxremote.authenticate=false", + "-Dcom.sun.management.jmxremote.ssl=false", + */ + /* + * Override the size of the default pool of direct (native) byte + * buffers. This was done to ensure that the nodes region for + * index segments remain fully buffered as the index partitions + * approach their maximum size before a split. + */ + "-Dcom.bigdata.io.DirectBufferPool.bufferCapacity="+ + ConfigMath.multiply(Bytes.kilobyte,1250), + }; + + serviceCount = bigdata.dataServiceCount; + + // restrict where the data services can run. + constraints = new IServiceConstraint[] { + + new JiniRunningConstraint(), + new ZookeeperRunningConstraint(), + //new TXRunningConstraint(), + + new HostAllowConstraint(bigdata.ds), + + new MaxDataServicesPerHostConstraint(bigdata.maxDataServicesPerHost), + + }; + + /* + * Note: the [dataDir] will be filled in when a new service + * instance is created based on the [servicesDir], so don't set it + * here yourself. + */ + properties = new NV[]{ + + new NV(DataServer.Options.BUFFER_MODE, + //""+com.bigdata.journal.BufferMode.Direct + ""+com.bigdata.journal.BufferMode.DiskWORM + ), + + /* Option disables synchronous overflow after N times and + * configures the offset bits for the journal for a scale-up + * configuration so we may use very large journals. + */ + //new NV(DataServer.Options.OVERFLOW_MAX_COUNT,"5"), + //new NV(DataServer.Options.OFFSET_BITS,""+com.bigdata.rawstore.WormAddressManager.SCALE_UP_OFFSET_BITS), + + /* Synchronous overflow is triggered when the live journal is + * this full (the value is a percentage, expressed as a + * floating point number in [0:1]). + */ + //new NV(DataServer.Options.OVERFLOW_THRESHOLD,".9"), + + /* Override the initial and maximum extent so that they are more + * more suited to large data sets. Overflow will be triggered as + * the size of the journal approaches the maximum extent. The + * initial and maximum extent are configured up above. + */ + + new NV(DataServer.Options.INITIAL_EXTENT, "" + bigdata.journalExtent), + new NV(DataServer.Options.MAXIMUM_EXTENT, "" + bigdata.journalExtent), + + /* Specify the queue capacity for the write service (unisolated + * write operations). + * + * 0 := SynchronousQueue. + * N := bounded queue of capacity N + * Integer.MAX_VALUE := unbounded queue. + * + * Note: The corePoolSize will never increase for an unbounded + * queue so the value specified for maximumPoolSize will + * essentially be ignored in this case. + * + * Note: A SynchronousQueue is a good choice here since it allows + * the #of threads to change in response to demand. The pool + * size should be unbounded when using a SynchronousQueue. + */ + new NV(DataServer.Options.WRITE_SERVICE_QUEUE_CAPACITY,"0"), // synchronous queue. + new NV(DataServer.Options.WRITE_SERVICE_CORE_POOL_SIZE,"50"), // + new NV(DataServer.Options.WRITE_SERVICE_MAXIMUM_POOL_SIZE,""+Integer.MAX_VALUE), + new NV(DataServer.Options.WRITE_SERVICE_PRESTART_ALL_CORE_THREADS,"true"), + + /* + * Options turns off overflow processing (debugging only). + * All writes will go onto the live journal, no index segments + * will be built, and indices will not be split, moved, + * joined, etc. + */ + //new NV(DataServer.Options.OVERFLOW_ENABLED,"false"), + + /* Maximum #of index partition moves per overflow. + */ + new NV(DataServer.Options.MAXIMUM_MOVES,"1"), + + /* Option controls how many index partitions may be moved onto + * any given target data service in a single overflow cycle + * and may be used to disable index partition moves (for + * debugging purposes). + */ + new NV(DataServer.Options.MAXIMUM_MOVES_PER_TARGET,"1"), + + /* The minimum CPU activity on a host before it will consider moving an + * index partition to shed some load. + * + * @todo A high threshold was chosen for the 3-node cluster since there + * are only 2 machines running data services. A "feature" in the load + * balancer allows moves between two heavily loaded hosts even when they + * are very close in their load, which is typically the case if you have + * only 2 machines running data services. The high threshold here is a + * workaround until the load balancer is modified to take into account + * whether or not a significant difference exists in the load between + * the source and possible target data service hosts. + */ + new NV(DataServer.Options.MOVE_PERCENT_CPU_TIME_THRESHOLD,".99"),//was .7 + + /* Option limits the #of index segments in a view before a + * compacting merge is forced. + */ + new NV(DataServer.Options.MAXIMUM_SEGMENTS_PER_VIEW,"5"), // default 6 + + /* Option limits the #of optional merges that are performed in each + * overflow cycle. + */ + new NV(DataServer.Options.MAXIMUM_OPTIONAL_MERGES_PER_OVERFLOW,"1"), + + /* Option effects how much splits are emphasized for a young + * scale-out index. If the index has fewer than this many + * partitions, then there will be a linear reduction in the + * target index partition size which will increase the likelyhood + * of an index split under heavy writes. This helps to distribute + * the index early in its life cycle. + */ + new NV(DataServer.Options.ACCELERATE_SPLIT_THRESHOLD,"20"),//20//50 + + /* Options accelerates overflow for data services have fewer than + * the threshold #of bytes under management. Acceleration is + * accomplished by reducing the maximum extent of the live journal + * linearly, but with a minimum of a 10M maximum extent. When the + * maximum extent is reduced by this option, the initial and the + * maximum extent will always be set to the same value for that + * journal. + */ + new NV(DataServer.Options.ACCELERATE_OVERFLOW_THRESHOLD, + //"0" + //""+com.bigdata.rawstore.Bytes.gigabyte + "2147483648" // 2G + ), + + // #of threads for index segment builds (default 3). + new NV(DataServer.Options.BUILD_SERVICE_CORE_POOL_SIZE,"5"), + + // #of threads for compacting merges (default 1). + new NV(DataServer.Options.MERGE_SERVICE_CORE_POOL_SIZE,"1"), + +// // Zero is full parallelism; otherwise #of threads in the pool. +// new NV(DataServer.Options.OVERFLOW_TASKS_CONCURRENT,"5"), + + /* Use Long.MAX_VALUE to always run overflow processing to + * completion (until no more data remains on the old journal). + */ + new NV(DataServer.Options.OVERFLOW_TIMEOUT,""+Long.MAX_VALUE), + + new NV(DataServer.Options.OVERFLOW_CANCELLED_WHEN_JOURNAL_FULL,"false"), + + new NV(DataServer.Options.LIVE_INDEX_CACHE_CAPACITY,"10"), // was 60 + + new NV(DataServer.Options.HISTORICAL_INDEX_CACHE_CAPACITY,"10"), // was 60 + + /* The maximum #of clean indices that will be retained on the + * hard reference queue (default 20). + */ + new NV(DataServer.Options.INDEX_CACHE_CAPACITY,"10"), // was 50 + + /* The timeout for unused index references before they are + * cleared from the hard reference queue (default is 1m). + * After this timeout the index reference is cleared from the + * queue and the index will be closed unless a hard reference + * exists to the index. + */ +// new NV(DataServer.Options.INDEX_CACHE_TIMEOUT,"1200000"), // 20m vs 1m + + /* The maximum #of clean index segments that will be retained + * on the hard reference queue (default 60). Note that ALL + * index segments are clean (they are read-only). + */ + new NV(DataServer.Options.INDEX_SEGMENT_CACHE_CAPACITY,"20"), // was 100 + + /* The timeout for unused index segment references before they + * are cleared from the hard reference queue (default is 1m). + * After this timeout the index segment reference is cleared + * from the queue and the index segment will be closed unless + * a hard reference exists to the index segment. + */ +// new NV(DataServer.Options.INDEX_SEGMENT_CACHE_TIMEOUT,"60000000"), // 10m vs 1m + + /* The #of store files (journals and index segment stores) + * whose hard references will be maintained on a queue. The + * value should be slightly more than the index segment cache + * capacity since some journals also used by the views, but + * same journals are shared by all views so adding 3 is plenty.. + */ + new NV(DataServer.Options.STORE_CACHE_CAPACITY,"23"),// was 110 + +// new NV(DataServer.Options.STORE_CACHE_TIMEOUT,"1200000"),//20m vs 1m. + + }; + +} + +/** + * Configuration options for the containers used to distribute application tasks + * across a federation. + * + * @todo There should be a means to tag certain client servers for one purpose + * or another. This could be handled by subclassing, but it really should be + * declarative. + */ +com.bigdata.service.jini.ClientServer { + + args = new String[]{ + //bigdata.profilerAgent, + /* + * Grant lots of memory, but read on. + * + * Note: 32-bit JVMs have a 2G limit on the heap, but the practical limit + * is often much less - maybe 1400m. 64-bit JVMs can use much more RAM. + * However, the heap which you grant to java DOES NOT determine the total + * process heap. I have seen 64-bit java processes using an additional + * 3-4GB of heap beyond what is specified here. So, you need to consider + * the total RAM, subtract out enough for the other processes and the OS + * buffers, divide by the #of client/data services you plan to run on that + * host (generally 1-2) and then subtract out some more space for the JVM + * itself. + * + * For example, if you have 32G RAM and a 64-bit JVM and plan to run two + * CS/DS on the host, I would recommend 10G for the Java heap. You can + * expect to see Java grab another 4G per process over time. That makes + * the per CS/DS heap 14G. With two processes you have taken 28G leaving + * 4G for everything else. + * + * Here is another example: 4G RAM, 32-bit JVM, and 2 CS/DS per host. I + * would stick to 800m for the Java heap. You don't have a problem unless + * you see an OOM (OutOfMemoryException) or a process killed because GC is + * taking too much time. + * + * See http://www.ibm.com/developerworks/linux/library/j-nativememory-linux/index.html?ca=dgr-lnxw07Linux-JVM&S_TACT=105AGX59&S_CMP=grlnxw07 + * + * Note: for linux, "sysctl -w vm.swappiness=0" will keep the RAM you do + * have for your applications! + */ + "-Xmx2g", // was 800m + /* + * This option will keep the JVM "alive" even when it is memory starved + * but perform of a memory starved JVM is terrible. + */ + //"-XX:-UseGCOverheadLimit", + /* Configure GC for higher throughput. Together these options + * request parallel old generation collection using N threads. + * The application will be paused when this occurs, but GC will + * be faster. Hence throughput will be higher. + */ + "-XX:+UseParallelOldGC", + //"-XX:ParallelGCThreads=8", + /* + * Enable JXM remote management for the data service. + * + * Note: This will not work if you have two such services on a host + * because it will assign the same port to each service. In order + * to work around that the argument would have to be specified by + * the service starter and then published in the Entry[] attributes. + * + * However, you can use ssh -X to open a tunnel with X + * forwarding and then run jconsole locally on the target host + * and bring up these data services without enabling remote + * JMX. + * + "-Dcom.sun.management.jmxremote.port=9996", + "-Dcom.sun.management.jmxremote.authenticate=false", + "-Dcom.sun.management.jmxremote.ssl=false", + */ + }; + + serviceCount = bigdata.clientServiceCount; + + constraints = new IServiceConstraint[] { + + new JiniRunningConstraint(), + new ZookeeperRunningConstraint(), + + new HostAllowConstraint(bigdata.cs), + + new MaxClientServicesPerHostConstraint(bigdata.maxClientServicePerHost), + + }; + + properties = new NV[] { + + }; + +} + +com.bigdata.service.jini.LoadBalancerServer { + + constraints = new IServiceConstraint[] { + + new JiniRunningConstraint(), + new ZookeeperRunningConstraint(), + + new HostAllowConstraint(bigdata.lbs) + + }; + + args = new String[]{ + /* + * FIXME The load balancer is a big piggish on long runs because it + * keeps the performance counter histories in RAM. While those histories + * are bounded, it still uses more RAM than it should. + */ + "-Xmx1G", + /* + * Enable JXM remote management for the data service. + * + * Note: This will not work if you have two data services on a host + * because it will assign the same port to each service. In order + * to work around that the argument would have to be specified by + * the service starter and then published in the Entry[] attributes. + * + "-Dcom.sun.management.jmxremote.port=9998", + "-Dcom.sun.management.jmxremote.authenticate=false", + "-Dcom.sun.management.jmxremote.ssl=false", + */ + }; + + /* + * Override some properties. + */ + properties = new NV[] { + + /* + * Each JiniClient (and hence all bigdata services) can run an + * httpd that will expose performance counters for the service and + * the host on which it is running. This property specifies the + * port for that httpd service. Valid values are port number, + * zero (0) for a random open port, MINUS ONE (-1) to disable the + * httpd service. + * + * Note: The load balancer httpd normally uses a known port so + * that it is easy to find. This is where you will find all of + * the performance counters aggregated for the entire federation, + * including their history. + */ + new NV(IBigdataClient.Options.HTTPD_PORT, "@LOAD_BALANCER_PORT@"), + + /* + * Note: The load balancer SHOULD NOT collect platform statistics + * itself since that interfers with its ability to aggregate + * statistics about the host on which it is running. Instead it + * should rely on the presence of at least one other service + * running on the same host to report those statistics to the load + * balancer. + */ + new NV(IBigdataClient.Options.COLLECT_PLATFORM_STATISTICS,"false"), + + /* + * The directory where the aggregated statistics will be logged. + * The load balancer will write snapshots of the historical + * counters into this directory. See LoadBalancerService javadoc + * for configuration options which effect how frequently it will + * log its counters and how many snapshots will be preserved. + * + * Note: You only need to specify this option if you want to put + * the files into a well known location, e.g, on a shared volume. + */ + //new NV(LoadBalancerServer.Options.LOG_DIR,"/opt2/var/log/bigdata"), + + /* Option essentially turns off the load-based decision making for + * this many minutes and substitutes a round-robin policy for + * recommending the least utilized data services. The main reason + * to this is to force the initial allocation to be distributed as + * evenly as possible across the data services in the cluster. + */ + new NV(LoadBalancerServer.Options.INITIAL_ROUND_ROBIN_UPDATE_COUNT,"10"), + + }; + +} + +/** + * Configuration options for the KB instance. + */ +lubm { + + // The #of universities to generate. + // U8000 is 1.2B told triples + // U25000 is 3.4B told triples. + // U50000 is 6.7B told triples. + // U100000 is ~12B told triples. + static private univNum = 1000; + + // the KB namespace (based on the #of universities by default). + static private namespace = "U"+univNum+""; + + // minimum #of data services to run. + static private minDataServices = bigdata.dataServiceCount; + + // How long the master will wait to discover the minimum #of data + // services that you specified (ms). + static private awaitDataServicesTimeout = 8000; + + /* Multiplier for the scatter effect. + */ + static private scatterFactor = 1; + static private scatterFactor_term2id = 1; + + /* The #of index partitions to allocate on a scatter split. ZERO + * (0) means that 2 index partitions will be allocated per + * data service which partiticpates in the scatter split. + * Non-zero values directly give the #of index partitions to + * create. + */ + static private scatterSplitIndexPartitionCount = ConfigMath.multiply + ( scatterFactor, + bigdata.dataServiceCount + ); + static private scatterSplitIndexPartitionCount_term2id = ConfigMath.multiply + ( scatterFactor_term2id, + bigdata.dataServiceCount + ); + + // Use all discovered data services when scattering an index. + static private scatterSplitDataServiceCount = 0; + + /* Scatter split trigger point. The scatter split will not be + * triggered until the initial index partition has reached + * this percentage of a nominal index partition in size. + */ + static private scatterSplitPercentOfSplitThreshold = 0.5;//was .5 + + /* + * Multipliers that compensate for the consumer/producer ratio for + * the asynchronous index write API. These are empirical factors + * based on observing the ratio (chunkWritingTime/chunkWaitingTime). + * Assuming a constant chunk writing time, if the chunk size for each + * index is adjusted by its multiplier then this ratio would be 1:1. + * In practice, the chunk writing time is not a linear function of + * the chunk size, which is one reason why we prefer larger chunks + * and why the asynchronous write API is a win. + * + * Note: These factors were set relative to TERM2ID. However, when + * I reduced the scatterFactor for TERM2ID by 1/2, I doubled its + * chunk size to keep up the same throughput so it is now at 2.00 + * rather than 1.00. + */ + static private chunkSizeFactor_id2term = 1.79; + static private chunkSizeFactor_term2id = 2.00; + static private chunkSizeFactor_spo = 8.00; // was 3.89 + static private chunkSizeFactor_pos = 8.00; // was 13.37 + static private chunkSizeFactor_osp = 8.00; // was 27.35 + + /* The nominal sink chunk size. For each index, this is adjusted + * by the factor specified above. + */ +// static private sinkChunkSize = 10000; + static private sinkChunkSize = 1000; + + /* + * Specify / override some triple store properties. + * + * Note: You must reference this object in the section for the + * component which will actually create the KB instance, e.g., + * either the RDFDataLoadMaster or the LubmGeneratorMaster. + */ + static private properties = new NV[] { + + /* + * When "true", the store will perform incremental closure as + * the data are loaded. When "false", the closure will be + * computed after all data are loaded. (Actually, since we are + * not loading through the SAIL making this true does not + * cause incremental TM but it does disable closure, so + * "false" is what you need here). + */ + new NV(BigdataSail.Options.TRUTH_MAINTENANCE, "false" ), + + /* + * Enable rewrites of high-level queries into native rules (native JOIN + * execution). (Can be changed without re-loading the data to compare + * the performance of the Sesame query evaluation against using the + * native rules to perform query evaluation.) + */ + new NV(BigdataSail.Options.NATIVE_JOINS, "true"), + + /* + * May be used to turn off inference during query, but will + * cause ALL inferences to be filtered out when reading on the + * database. + */ + // new NV(BigdataSail.Options.INCLUDE_INFERRED, "false"), + + /* + * May be used to turn off query-time expansion of entailments such as + * (x rdf:type rdfs:Resource) and owl:sameAs even through those + * entailments were not materialized during forward closure (this + * disables the backchainer!) + */ + new NV(BigdataSail.Options.QUERY_TIME_EXPANDER, "false"), + + /* + * Option to restrict ourselves to RDFS only inference. This + * condition may be compared readily to many other stores. + * + * Note: While we can turn on some kinds of owl processing + * (e.g., TransitiveProperty, see below), we can not compute + * all the necessary entailments (only queries 11 and 13 + * benefit). + * + * Note: There are no owl:sameAs assertions in LUBM. + * + * Note: lubm query does not benefit from owl:inverseOf. + * + * Note: lubm query does benefit from owl:TransitiveProperty + * (queries 11 and 13). + * + * Note: owl:Restriction (which we can not compute) plus + * owl:TransitiveProperty is required to get all the answers + * for LUBM. + */ + new NV(BigdataSail.Options.AXIOMS_CLASS, "com.bigdata.rdf.axioms.RdfsAxioms"), + // new NV(BigdataSail.Options.AXIOMS_CLASS,"com.bigdata.rdf.axioms.NoAxioms"), + + /* + * Produce a full closure (all entailments) so that the + * backward chainer is always a NOP. Note th... [truncated message content] |
From: <dm...@us...> - 2010-09-14 13:50:38
|
Revision: 3542 http://bigdata.svn.sourceforge.net/bigdata/?rev=3542&view=rev Author: dmacgbr Date: 2010-09-14 13:50:31 +0000 (Tue, 14 Sep 2010) Log Message: ----------- See trac #146. Allow specification of a default graph when running a bulk load of RDF triple data into a quad store. This is achieved by setting com.bigdata.rdf.load.MappedRDFDataLoadMaster.defaultGraph to the desired value, e.g. "http://xyz.com/data/defaultGraph", in the bigdata configuration file. This parameter has no effect when loading a triple store. Further, if not specified when loading a quad store, the systems behaviour is unaffected by this change. i.e. the graph/context co-ordinate in each quad remains null. Various of the unit tests touched by this change have been modified effectively assuming that the default graph has not been specified. Modified Paths: -------------- trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java trunk/bigdata-sails/src/test/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -272,7 +272,18 @@ // // /** {@value #DEFAULT_MAX_TRIES} */ // int DEFAULT_MAX_TRIES = 3; - + + /** + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. + */ + String DEFAULT_GRAPH = "defaultGraph" ; + + /** + * TODO Should we always enforce a real value? i.e. provide a real default + * or abort the load. + */ + String DEFAULT_DEFAULT_GRAPH = null ; } /** @@ -402,6 +413,12 @@ private transient RDFFormat rdfFormat; /** + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. + */ + public final String defaultGraph ; + + /** * Force the load of the NxParser integration class and its registration * of the NQuadsParser#nquads RDFFormat. * @@ -496,6 +513,8 @@ sb.append(", " + ConfigurationOptions.RDF_FORMAT + "=" + rdfFormat); + sb.append(", " + ConfigurationOptions.DEFAULT_GRAPH + "=" + defaultGraph) ; + sb.append(", " + ConfigurationOptions.FORCE_OVERFLOW_BEFORE_CLOSURE + "=" + forceOverflowBeforeClosure); @@ -601,6 +620,10 @@ } + defaultGraph = (String) config.getEntry(component, + ConfigurationOptions.DEFAULT_GRAPH, String.class, + ConfigurationOptions.DEFAULT_DEFAULT_GRAPH); + rejectedExecutionDelay = (Long) config.getEntry( component, ConfigurationOptions.REJECTED_EXECUTION_DELAY, Long.TYPE, @@ -979,6 +1002,7 @@ jobState.ontology,//file jobState.ontology.getPath(),//baseURI jobState.getRDFFormat(),// + jobState.defaultGraph, jobState.ontologyFileFilter // ); Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -223,6 +223,7 @@ jobState.valuesInitialCapacity,// jobState.bnodesInitialCapacity,// jobState.getRDFFormat(), // + jobState.defaultGraph, parserOptions,// false, // deleteAfter is handled by the master! jobState.parserPoolSize, // Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -186,7 +186,7 @@ // run the parser. // @todo reuse the same underlying parser instance? - loader.loadRdf(reader, baseURL, rdfFormat, parserOptions); + loader.loadRdf(reader, baseURL, rdfFormat, null, parserOptions); success = true; Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -356,8 +356,14 @@ * The default {@link RDFFormat}. */ private final RDFFormat defaultFormat; - + /** + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. + */ + private final String defaultGraph; + + /** * Options for the {@link RDFParser}. */ private final RDFParserOptions parserOptions; @@ -1423,7 +1429,7 @@ try { // run the parser. new PresortRioLoader(buffer).loadRdf(reader, baseURL, - rdfFormat, parserOptions); + rdfFormat, defaultGraph, parserOptions); } finally { reader.close(); } @@ -1490,6 +1496,9 @@ * {@link BNode}s parsed from a single document. * @param defaultFormat * The default {@link RDFFormat} which will be assumed. + * @param defaultGraph + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. * @param parserOptions * Options for the {@link RDFParser}. * @param deleteAfter @@ -1529,6 +1538,7 @@ final int valuesInitialCapacity,// final int bnodesInitialCapacity, // final RDFFormat defaultFormat,// + final String defaultGraph,// final RDFParserOptions parserOptions,// final boolean deleteAfter,// final int parserPoolSize,// @@ -1566,6 +1576,8 @@ this.defaultFormat = defaultFormat; + this.defaultGraph = defaultGraph; + this.parserOptions = parserOptions; this.deleteAfter = deleteAfter; Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -37,6 +37,8 @@ import org.openrdf.rio.RDFParser; import org.openrdf.rio.Rio; +import com.bigdata.rdf.model.BigdataURI; + /** * Parses data but does not load it into the indices. * @@ -74,6 +76,8 @@ private final ValueFactory valueFactory; + protected String defaultGraph; + public BasicRioLoader(final ValueFactory valueFactory) { if (valueFactory == null) @@ -153,18 +157,20 @@ } final public void loadRdf(final InputStream is, final String baseURI, - final RDFFormat rdfFormat, final RDFParserOptions options) + final RDFFormat rdfFormat, final String defaultGraph, + final RDFParserOptions options) throws Exception { - loadRdf2(is, baseURI, rdfFormat, options); + loadRdf2(is, baseURI, rdfFormat, defaultGraph, options); } final public void loadRdf(final Reader reader, final String baseURI, - final RDFFormat rdfFormat, final RDFParserOptions options) + final RDFFormat rdfFormat, final String defaultGraph, + final RDFParserOptions options) throws Exception { - loadRdf2(reader, baseURI, rdfFormat, options); + loadRdf2(reader, baseURI, rdfFormat, defaultGraph, options); } @@ -180,7 +186,7 @@ * @throws Exception */ protected void loadRdf2(final Object source, final String baseURI, - final RDFFormat rdfFormat, final RDFParserOptions options) + final RDFFormat rdfFormat, final String defaultGraph, final RDFParserOptions options) throws Exception { if (source == null) @@ -198,6 +204,8 @@ if (log.isInfoEnabled()) log.info("format=" + rdfFormat + ", options=" + options); + this.defaultGraph = defaultGraph ; + final RDFParser parser = getParser(rdfFormat); // apply options to the parser @@ -212,7 +220,7 @@ // Note: reset so that rates are correct for each source loaded. stmtsAdded = 0; - + try { before(); Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -72,12 +72,14 @@ * The base URL for those data. * @param rdfFormat * The interchange format. + * @param defaultGraph + * The default graph. * @param options * Options to be applied to the {@link RDFParser}. * @throws Exception */ public void loadRdf(Reader reader, String baseURL, RDFFormat rdfFormat, - RDFParserOptions options) throws Exception; + String defaultGraph, RDFParserOptions options) throws Exception; /** * Parse RDF data. @@ -88,11 +90,13 @@ * The base URL for those data. * @param rdfFormat * The interchange format. + * @param defaultGraph + * The default graph. * @param options * Options to be applied to the {@link RDFParser}. * @throws Exception */ public void loadRdf(InputStream is, String baseURI, RDFFormat rdfFormat, - RDFParserOptions options) throws Exception; + String defaultGraph, RDFParserOptions options) throws Exception; } Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -23,11 +23,14 @@ */ package com.bigdata.rdf.rio; +import org.openrdf.model.Resource; import org.openrdf.model.Statement; import org.openrdf.model.Value; import org.openrdf.rio.RDFHandler; import org.openrdf.rio.RDFHandlerException; +import com.bigdata.rdf.model.BigdataURI; + /** * Statement handler for the RIO RDF Parser that writes on a * {@link StatementBuffer}. @@ -45,6 +48,12 @@ final protected IStatementBuffer<?> buffer; /** + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. + */ + private BigdataURI defaultGraphURI = null ; + + /** * Sets up parser to load RDF. * * @param buffer @@ -58,7 +67,7 @@ this.buffer = buffer; } - + /** * bulk insert the buffered data into the store. */ @@ -87,8 +96,11 @@ public RDFHandler newRDFHandler() { + defaultGraphURI = null != defaultGraph && 4 == buffer.getDatabase ().getSPOKeyArity () + ? buffer.getDatabase ().getValueFactory ().createURI ( defaultGraph ) + : null + ; return this; - } public void handleStatement( final Statement stmt ) { @@ -98,9 +110,13 @@ log.debug(stmt); } - + + Resource graph = stmt.getContext() ; + if ( null == graph + && null != defaultGraphURI ) // only true when we know we are loading a quad store + graph = defaultGraphURI ; // buffer the write (handles overflow). - buffer.add( stmt.getSubject(), stmt.getPredicate(), stmt.getObject(), stmt.getContext() ); + buffer.add( stmt.getSubject(), stmt.getPredicate(), stmt.getObject(), graph ); stmtsAdded++; Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -640,7 +640,7 @@ final LoadStats totals = new LoadStats(); - loadData3(totals, reader, baseURL, rdfFormat, true/*endOfBatch*/); + loadData3(totals, reader, baseURL, rdfFormat, null, true/*endOfBatch*/); return totals; @@ -668,7 +668,7 @@ final LoadStats totals = new LoadStats(); - loadData3(totals, is, baseURL, rdfFormat, true/* endOfBatch */); + loadData3(totals, is, baseURL, rdfFormat, null, true/* endOfBatch */); return totals; @@ -704,7 +704,7 @@ final LoadStats totals = new LoadStats(); - loadData3(totals, is, baseURL, rdfFormat, true/*endOfBatch*/); + loadData3(totals, is, baseURL, rdfFormat, null, true/*endOfBatch*/); return totals; @@ -762,7 +762,7 @@ if(file.exists()) { loadFiles(totals, 0/* depth */, file, baseURL, - rdfFormat, filter, endOfBatch); + rdfFormat, null, filter, endOfBatch); return; @@ -789,7 +789,7 @@ try { - loadData3(totals, reader, baseURL, rdfFormat, endOfBatch); + loadData3(totals, reader, baseURL, rdfFormat, null, endOfBatch); } catch (Exception ex) { @@ -817,6 +817,9 @@ * The format of the file (optional, when not specified the * format is deduced for each file in turn using the * {@link RDFFormat} static methods). + * @param defaultGraph + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. * @param filter * A filter selecting the file names that will be loaded * (optional). When specified, the filter MUST accept directories @@ -827,7 +830,8 @@ * @throws IOException */ public LoadStats loadFiles(final File file, final String baseURI, - final RDFFormat rdfFormat, final FilenameFilter filter) + final RDFFormat rdfFormat, final String defaultGraph, + final FilenameFilter filter) throws IOException { if (file == null) @@ -835,7 +839,7 @@ final LoadStats totals = new LoadStats(); - loadFiles(totals, 0/* depth */, file, baseURI, rdfFormat, filter, true/* endOfBatch */ + loadFiles(totals, 0/* depth */, file, baseURI, rdfFormat, defaultGraph, filter, true/* endOfBatch */ ); return totals; @@ -844,7 +848,8 @@ protected void loadFiles(final LoadStats totals, final int depth, final File file, final String baseURI, final RDFFormat rdfFormat, - final FilenameFilter filter, final boolean endOfBatch) + final String defaultGraph, final FilenameFilter filter, + final boolean endOfBatch) throws IOException { if (file.isDirectory()) { @@ -864,7 +869,7 @@ // final RDFFormat fmt = RDFFormat.forFileName(f.toString(), // rdfFormat); - loadFiles(totals, depth + 1, f, baseURI, rdfFormat, filter, + loadFiles(totals, depth + 1, f, baseURI, rdfFormat, defaultGraph, filter, (depth == 0 && i < files.length ? false : endOfBatch)); } @@ -919,7 +924,7 @@ final String s = baseURI != null ? baseURI : file.toURI() .toString(); - loadData3(totals, reader, s, fmt, endOfBatch); + loadData3(totals, reader, s, fmt, defaultGraph, endOfBatch); return; @@ -955,7 +960,7 @@ */ protected void loadData3(final LoadStats totals, final Object source, final String baseURL, final RDFFormat rdfFormat, - final boolean endOfBatch) throws IOException { + final String defaultGraph, final boolean endOfBatch) throws IOException { final long begin = System.currentTimeMillis(); @@ -978,11 +983,10 @@ } // Setup the loader. - final PresortRioLoader loader = new PresortRioLoader(buffer); + final PresortRioLoader loader = new PresortRioLoader ( buffer ) ; // @todo review: disable auto-flush - caller will handle flush of the buffer. // loader.setFlush(false); - // add listener to log progress. loader.addRioLoaderListener( new RioLoaderListener() { @@ -1006,12 +1010,12 @@ if(source instanceof Reader) { - loader.loadRdf((Reader) source, baseURL, rdfFormat, parserOptions); + loader.loadRdf((Reader) source, baseURL, rdfFormat, defaultGraph, parserOptions); } else if (source instanceof InputStream) { loader.loadRdf((InputStream) source, baseURL, rdfFormat, - parserOptions); + defaultGraph, parserOptions); } else throw new AssertionError(); @@ -1360,7 +1364,7 @@ // rdfFormat, filter); dataLoader.loadFiles(totals, 0/* depth */, fileOrDir, baseURI, - rdfFormat, filter, true/* endOfBatch */ + rdfFormat, null, filter, true/* endOfBatch */ ); } Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -714,7 +714,7 @@ try { // run the parser. new MyLoader(buffer).loadRdf(reader, baseURL, - defaultRDFFormat, s.parserOptions); + defaultRDFFormat, null, s.parserOptions); } finally { reader.close(); } Modified: trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java =================================================================== --- trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -401,7 +401,7 @@ }); - loader.loadRdf((Reader) reader, baseURI, rdfFormat, options); + loader.loadRdf((Reader) reader, baseURI, rdfFormat, null, options); if (log.isInfoEnabled()) log.info("Done: " + resource); @@ -681,7 +681,7 @@ loader.loadRdf(new BufferedReader(new InputStreamReader( new FileInputStream(resource))), baseURI, rdfFormat, - options); + null, options); if(log.isInfoEnabled()) log.info("End of reparse: nerrors=" + nerrs + ", file=" Modified: trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java =================================================================== --- trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -161,6 +161,7 @@ valuesInitialCapacity,// bnodesInitialCapacity,// RDFFormat.RDFXML, // defaultFormat + null, // defaultGraph parserOptions, // parserOptions false, // deleteAfter poolSize, // parserPoolSize, Modified: trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java =================================================================== --- trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -400,6 +400,7 @@ valuesInitialCapacity,// bnodesInitialCapacity,// RDFFormat.RDFXML, // defaultFormat + null, // defaultGraph parserOptions, // false, // deleteAfter parallel?5:1, // parserPoolSize, Modified: trunk/bigdata-sails/src/test/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java =================================================================== --- trunk/bigdata-sails/src/test/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-sails/src/test/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -1204,7 +1204,7 @@ try { dataLoader.loadFiles(dataDir, null/* baseURI */, - null/* rdfFormat */, filter); + null/* rdfFormat */, null, /* defaultGraph */filter); } catch (IOException ex) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-11-19 18:12:56
|
Revision: 3970 http://bigdata.svn.sourceforge.net/bigdata/?rev=3970&view=rev Author: thompsonbry Date: 2010-11-19 18:12:50 +0000 (Fri, 19 Nov 2010) Log Message: ----------- Added a skeleton for tests for binary compatibility. Added Paths: ----------- trunk/bigdata-compatibility/ trunk/bigdata-compatibility/src/ trunk/bigdata-compatibility/src/test/ trunk/bigdata-compatibility/src/test/com/ trunk/bigdata-compatibility/src/test/com/bigdata/ trunk/bigdata-compatibility/src/test/com/bigdata/journal/ trunk/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java Added: trunk/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java =================================================================== --- trunk/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java (rev 0) +++ trunk/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java 2010-11-19 18:12:50 UTC (rev 3970) @@ -0,0 +1,276 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +/* + * Created on Nov 19, 2010 + */ +package com.bigdata.journal; + +import java.io.File; +import java.io.IOException; +import java.util.Properties; +import java.util.UUID; + +import junit.framework.TestCase2; + +import com.bigdata.Banner; +import com.bigdata.btree.IIndex; +import com.bigdata.btree.IndexMetadata; + +/** + * Test suite for binary compatibility, portability, and forward compatibility + * or automated migration of persistent stores and persistence or serialization + * capable objects across different bigdata releases. The tests in this suite + * rely on artifacts which are archived within SVN. + * + * @todo create w/ small extent and truncate (RW store does not support + * truncate). + * + * @todo test binary migration and forward compatibility. + * + * @todo stubs to create and organize artifacts,etc. + * + * @todo data driven test suite? + * + * @todo create artifact for each release, name the artifacts systematically, + * e.g., test.release.(RW|WORM).jnl or test.release.seg. Collect a list of + * the created artifacts and run each test against each of the versions of + * the artifact. + * + * @todo Force artifact file name case for file system compatibility? + * + * @todo test journal (WORM and RW), btree, index segment, row store, persistent + * data structures (checkpoints, index metadata, tuple serializers, etc.), + * RDF layer, RMI message formats, etc. + * + * @todo Specific tests for + * <p> + * Name2Addr and DefaultKeyBuilderFactory portability problem. See + * https://sourceforge.net/apps/trac/bigdata/ticket/193 + * <p> + * WORM global row store resolution problem introduced in the + * JOURNAL_HA_BRANCH. See + * https://sourceforge.net/apps/trac/bigdata/ticket/171#comment:5 + * <p> + * Sparse row store JDK encoding problem: + * https://sourceforge.net/apps/trac/bigdata/ticket/107 + */ +public class TestBinaryCompatibility extends TestCase2 { + + /** + * + */ + public TestBinaryCompatibility() { + } + + /** + * @param name + */ + public TestBinaryCompatibility(String name) { + super(name); + } + + /** + * @todo munge the release version into a name that is compatibility with + * the file system ("." to "_"). Store artifacts at each release? At + * each release in which an incompatibility is introduced? At each + * release in which a persistence capable data structure or change is + * introduced? + */ + static protected final File artifactDir = new File( + "bigdata-compatibility/src/resources/artifacts"); + + protected static class Version { + private final String version; + private final String revision; + public Version(String version,String revision) { + this.version = version; + this.revision = revision; + } + + /** + * The bigdata version number associated with the release. This is in + * the form <code>xx.yy.zz</code> + */ + public String getVersion() { + return version; + } + + /** + * The SVN repository revision associated with the release. This is in + * the form <code>####</code>. + */ + public String getRevision() { + return revision; + } + } + + /** + * Known release versions. + */ + protected static Version V_0_83_2 = new Version("0.83.2", "3349"); + + /** + * Tested Versions. + */ + protected Version[] versions = new Version[] { + V_0_83_2 + }; + + protected void setUp() throws Exception { + + Banner.banner(); + + super.setUp(); + + if (!artifactDir.exists()) { + + if (!artifactDir.mkdirs()) { + + throw new IOException("Could not create: " + artifactDir); + + } + + } + + for (Version version : versions) { + + final File versionDir = new File(artifactDir, version.getVersion()); + + if (!versionDir.exists()) { + + if (!versionDir.mkdirs()) { + + throw new IOException("Could not create: " + versionDir); + + } + + } + + } + + } + + protected void tearDown() throws Exception { + + super.tearDown(); + + } + + /** + * @throws Throwable + * + * @todo Each 'test' should run an instance of a class which knows how to + * create the appropriate artifacts and how to test them. + */ + public void test_WORM_compatibility_with_JOURNAL_HA_BRANCH() + throws Throwable { + + final Version version = V_0_83_2; + + final File versionDir = new File(artifactDir, version.getVersion()); + + final File artifactFile = new File(versionDir, getName() + + BufferMode.DiskWORM + Journal.Options.JNL); + + if (!artifactFile.exists()) { + + createArtifact(artifactFile); + + } + + verifyArtifact(artifactFile); + + } + + protected void createArtifact(final File artifactFile) throws Throwable { + + if (log.isInfoEnabled()) + log.info("Creating: " + artifactFile); + + final Properties properties = new Properties(); + + properties.setProperty(Journal.Options.FILE, artifactFile.toString()); + + properties.setProperty(Journal.Options.INITIAL_EXTENT, "" + + Journal.Options.minimumInitialExtent); + + final Journal journal = new Journal(properties); + + try { + + final IndexMetadata md = new IndexMetadata(UUID.randomUUID()); + + final IIndex ndx = journal.registerIndex("kb.spo.SPO", md); + + ndx.insert(1,1); + + journal.commit(); + + // reduce to minimum footprint. + journal.truncate(); + + } catch (Throwable t) { + + journal.destroy(); + + throw new RuntimeException(t); + + } finally { + + if (journal.isOpen()) + journal.close(); + + } + + } + + protected void verifyArtifact(final File artifactFile) throws Throwable { + + if (log.isInfoEnabled()) + log.info("Verifying: " + artifactFile); + + final Properties properties = new Properties(); + + properties.setProperty(Journal.Options.FILE, artifactFile.toString()); + + final Journal journal = new Journal(properties); + + try { + + final IIndex ndx = journal.getIndex("kb.spo.SPO"); + + assertNotNull(ndx); + + assertEquals(1,ndx.lookup(1)); + + } finally { + + journal.close(); + + } + + } + +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-06 19:39:03
|
Revision: 4060 http://bigdata.svn.sourceforge.net/bigdata/?rev=4060&view=rev Author: thompsonbry Date: 2011-01-06 19:38:57 +0000 (Thu, 06 Jan 2011) Log Message: ----------- Fixed broken assert in ISPO. Added isTruthMaintenance() to BigdataSail. Modified Paths: -------------- trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java 2011-01-05 22:42:01 UTC (rev 4059) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java 2011-01-06 19:38:57 UTC (rev 4060) @@ -332,7 +332,7 @@ public static ModifiedEnum[] fromBooleans(final boolean[] b, final int n) { - assert n < b.length && n % 2 == 0; + assert n <= b.length && n % 2 == 0 : "n="+n+", b.length="+b.length; final ModifiedEnum[] m = new ModifiedEnum[n/2]; for (int i = 0; i < n; i+=2) { Modified: trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-01-05 22:42:01 UTC (rev 4059) +++ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-01-06 19:38:57 UTC (rev 4060) @@ -426,6 +426,18 @@ } /** + * Return <code>true</code> if the SAIL is using automated truth + * maintenance. + * + * @see Options#TRUTH_MAINTENANCE + */ + public boolean isTruthMaintenance() { + + return truthMaintenance; + + } + + /** * Return <code>true</code> iff star joins are enabled. */ public boolean isStarJoins() { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-05-10 19:39:49
|
Revision: 4479 http://bigdata.svn.sourceforge.net/bigdata/?rev=4479&view=rev Author: thompsonbry Date: 2011-05-10 19:39:39 +0000 (Tue, 10 May 2011) Log Message: ----------- Merge branch to trunk [r4392:r4478]. The branch is closed. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/striterator/ChunkedArrayIterator.java trunk/bigdata/src/test/com/bigdata/util/TestAll.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/axioms/BaseAxioms.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/Justification.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/ILexiconConfiguration.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicIndexWriteProc.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataStatementImpl.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/StatementEnum.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/BulkCompleteConverter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexRemover.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexWriteProc.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexWriter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOKeyOrder.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPORelation.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOTupleSerializer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/TestEncodeDecodeKeys.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPO.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOKeyCoders.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOTupleSerializer.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOValueCoders.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/store/TestStatementIdentifiers.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithSids.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java trunk/build.xml Added Paths: ----------- trunk/bigdata/src/java/com/bigdata/relation/accesspath/ArrayAccessPath.java trunk/bigdata/src/java/com/bigdata/util/Bits.java trunk/bigdata/src/test/com/bigdata/util/TestBits.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/SidIV.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/ModifiedEnum.java Property Changed: ---------------- trunk/ trunk/bigdata-jini/src/java/com/bigdata/attr/ trunk/bigdata-jini/src/java/com/bigdata/disco/ trunk/bigdata-jini/src/java/com/bigdata/disco/config/ trunk/bigdata-jini/src/java/com/bigdata/util/config/ trunk/bigdata-perf/ trunk/bigdata-perf/btc/ trunk/bigdata-perf/btc/src/ trunk/bigdata-perf/btc/src/resources/ trunk/bigdata-perf/btc/src/resources/logging/ trunk/bigdata-perf/lubm/lib/ trunk/bigdata-perf/lubm/src/resources/ trunk/bigdata-perf/lubm/src/resources/answers (U1)/ trunk/bigdata-perf/lubm/src/resources/config/ trunk/bigdata-perf/lubm/src/resources/logging/ trunk/bigdata-perf/lubm/src/resources/scripts/ trunk/bigdata-perf/uniprot/ trunk/bigdata-perf/uniprot/src/ trunk/bigdata-perf/uniprot/src/resources/ trunk/bigdata-perf/uniprot/src/resources/logging/ trunk/bigdata-rdf/src/java/com/bigdata/rdf/util/ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/ trunk/dsi-utils/LEGAL/ trunk/dsi-utils/lib/ trunk/dsi-utils/src/ trunk/dsi-utils/src/java/ trunk/dsi-utils/src/java/it/ trunk/dsi-utils/src/java/it/unimi/ trunk/dsi-utils/src/java/it/unimi/dsi/ trunk/dsi-utils/src/java/it/unimi/dsi/compression/ trunk/dsi-utils/src/java/it/unimi/dsi/io/ trunk/dsi-utils/src/java/it/unimi/dsi/util/ trunk/dsi-utils/src/test/ trunk/dsi-utils/src/test/it/ trunk/dsi-utils/src/test/it/unimi/ trunk/dsi-utils/src/test/it/unimi/dsi/ trunk/dsi-utils/src/test/it/unimi/dsi/io/ trunk/dsi-utils/src/test/it/unimi/dsi/util/ trunk/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ trunk/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ trunk/osgi/ Property changes on: trunk ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 + /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/SIDS_REFACTOR_FROM_TRUNK_BRANCH:4393-4478 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 Copied: trunk/bigdata/src/java/com/bigdata/relation/accesspath/ArrayAccessPath.java (from rev 4478, branches/SIDS_REFACTOR_FROM_TRUNK_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/ArrayAccessPath.java) =================================================================== --- trunk/bigdata/src/java/com/bigdata/relation/accesspath/ArrayAccessPath.java (rev 0) +++ trunk/bigdata/src/java/com/bigdata/relation/accesspath/ArrayAccessPath.java 2011-05-10 19:39:39 UTC (rev 4479) @@ -0,0 +1,196 @@ +/* +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.relation.accesspath; + +import java.util.Collections; + +import com.bigdata.btree.IIndex; +import com.bigdata.btree.ITupleIterator; +import com.bigdata.relation.rule.IPredicate; +import com.bigdata.striterator.ChunkedArrayIterator; +import com.bigdata.striterator.ChunkedWrappedIterator; +import com.bigdata.striterator.IChunkedOrderedIterator; +import com.bigdata.striterator.IKeyOrder; + +/** + * An access path over an array of elements. + */ +public class ArrayAccessPath<E> implements IAccessPath<E> { + + private final IPredicate<E> predicate; + + private final IKeyOrder<E> keyOrder; + + /** + * Array of elements + */ + private final E[] e; + + /** + * Ctor variant does not specify the {@link #getPredicate()} or the + * {@link #getKeyOrder()} and those methods will throw an + * {@link UnsupportedOperationException} if invoked. + */ + public ArrayAccessPath(final E[] e) { + + this(e, null/* predicate */, null/* keyOrder */); + + } + + /** + * Note: the {@link #getPredicate()} and {@link #getKeyOrder()} and methods + * will throw an {@link UnsupportedOperationException} if the corresponding + * argument is null. + */ + public ArrayAccessPath(final E[] e, + final IPredicate<E> predicate, final IKeyOrder<E> keyOrder) { + + this.predicate = predicate; + + this.keyOrder = keyOrder; + + this.e = e; + + } + + /** + * @throws UnsupportedOperationException + * unless the caller specified an {@link IPredicate} to the + * ctor. + */ + public IPredicate<E> getPredicate() { + + if (predicate == null) + throw new UnsupportedOperationException(); + + return predicate; + + } + + /** + * @throws UnsupportedOperationException + * unless the caller specified an {@link IKeyOrder} to the ctor. + */ + public IKeyOrder<E> getKeyOrder() { + + if (keyOrder == null) + throw new UnsupportedOperationException(); + + return keyOrder; + + } + + /** + * @throws UnsupportedOperationException + * since no index is associated with this array + */ + public IIndex getIndex() { + + throw new UnsupportedOperationException(); + + } + + /** + * Returns <code>true</code> when the array of elements is empty. + */ + public boolean isEmpty() { + + return e.length == 0; + + } + + /** + * Returns the size of the array of elements. + */ + public long rangeCount(boolean exact) { + + return e.length; + + } + + /** + * @throws UnsupportedOperationException + * since no index is associated with this array + */ + public ITupleIterator<E> rangeIterator() { + + throw new UnsupportedOperationException(); + + } + + /** + * Visits the entire array of elements. + */ + public IChunkedOrderedIterator<E> iterator() { + + if (e.length == 0) { + return new ChunkedWrappedIterator<E>( + Collections.EMPTY_LIST.iterator()); + } + + return new ChunkedArrayIterator<E>(e); + + } + + /** + * Visits the array of elements up to the specified limit. + */ + public IChunkedOrderedIterator<E> iterator(final int limit, + final int capacity) { + + return iterator(0L/* offset */, limit, capacity); + + } + + /** + * Visits the array of elements from the specified offset up to the + * specified limit. + */ + @SuppressWarnings("unchecked") + public IChunkedOrderedIterator<E> iterator(final long offset, + final long limit, final int capacity) { + + if (e.length == 0) { + return new ChunkedWrappedIterator<E>( + Collections.EMPTY_LIST.iterator()); + } + + final E[] a = (E[]) java.lang.reflect.Array.newInstance( + e[0].getClass(), (int) limit); + + System.arraycopy(e, (int) offset, a, 0, (int) limit); + + return new ChunkedArrayIterator<E>(a); + + } + + /** + * Does nothing and always returns ZERO(0). + */ + public long removeAll() { + + return 0L; + + } + +} Modified: trunk/bigdata/src/java/com/bigdata/striterator/ChunkedArrayIterator.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/striterator/ChunkedArrayIterator.java 2011-05-10 19:22:45 UTC (rev 4478) +++ trunk/bigdata/src/java/com/bigdata/striterator/ChunkedArrayIterator.java 2011-05-10 19:39:39 UTC (rev 4479) @@ -74,6 +74,18 @@ * * @param a * The array of elements. + */ + public ChunkedArrayIterator(final E[] a) { + + this(a.length, a, null); + + } + + /** + * An iterator that visits the elements in the given array. + * + * @param a + * The array of elements. * @param n * The #of entries in <i>a</i> that are valid. * @param keyOrder Copied: trunk/bigdata/src/java/com/bigdata/util/Bits.java (from rev 4478, branches/SIDS_REFACTOR_FROM_TRUNK_BRANCH/bigdata/src/java/com/bigdata/util/Bits.java) =================================================================== --- trunk/bigdata/src/java/com/bigdata/util/Bits.java (rev 0) +++ trunk/bigdata/src/java/com/bigdata/util/Bits.java 2011-05-10 19:39:39 UTC (rev 4479) @@ -0,0 +1,136 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.util; + +import it.unimi.dsi.bits.BitVector; + +import java.nio.ByteBuffer; + + +/** + * Simple helper class to work with bits inside a byte. Useful for classes + * that have a lot of boolean properties or pointers to enums that can be + * more compactly represented as a series of bit flags. See SPO. + * + * @author mikepersonick + */ +public class Bits { + + /** + * Set a bit inside a byte. + * + * @param bits + * the original byte + * @param i + * the bit index (0 through 7) + * @param bit + * the bit value + * @return + * the new byte + */ + public static byte set(final byte bits, final int i, final boolean bit) { + + // check to see if bits[i] == bit already, if so, nothing to do + // also does range check on i + if (get(bits, i) == bit) + return bits; + + byte b = bits; + if (bit) { + b = (byte) (b | (0x1 << i)); + } else { + b = (byte) (b & ~(0x1 << i)); + } + return b; + + } + + /** + * Get a bit from inside a byte. + * + * @param bits + * the byte + * @param i + * the bit index (0 through 7) + * @return + * the bit value + */ + public static boolean get(final byte bits, final int i) { + + if (i < 0 || i > 7) { + throw new IndexOutOfBoundsException(); + } + + return (bits & (0x1 << i)) != 0; + + } + + /** + * Get a new byte, masking off all but the bits specified by m. + * + * @param bits + * the original byte + * @param m + * the bits to keep, all others will be masked + * @return + * the new byte + */ + public static byte mask(final byte bits, final int... m) { + + byte b = 0; + + for (int i = 0; i < m.length; i++) { + + if (m[i] < 0 || m[i] > 7) { + throw new IndexOutOfBoundsException(); + } + + b |= (0x1 << m[i]); + + } + + b &= bits; + + return b; + + } + + /** + * Useful for debugging. + * + * @param bits + * the byte + * @return + * the unsigned binary string representation + */ + public static String toString(final byte bits) { + + final byte[] d = new byte[] { bits }; + final ByteBuffer b = ByteBuffer.wrap(d); + final BitVector v = new ByteBufferBitVector(b); + return v.toString(); + + } + +} Modified: trunk/bigdata/src/test/com/bigdata/util/TestAll.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/util/TestAll.java 2011-05-10 19:22:45 UTC (rev 4478) +++ trunk/bigdata/src/test/com/bigdata/util/TestAll.java 2011-05-10 19:39:39 UTC (rev 4479) @@ -79,6 +79,8 @@ suite.addTestSuite( TestCSVReader.class ); + suite.addTestSuite( TestBits.class ); + return suite; } Copied: trunk/bigdata/src/test/com/bigdata/util/TestBits.java (from rev 4478, branches/SIDS_REFACTOR_FROM_TRUNK_BRANCH/bigdata/src/test/com/bigdata/util/TestBits.java) =================================================================== --- trunk/bigdata/src/test/com/bigdata/util/TestBits.java (rev 0) +++ trunk/bigdata/src/test/com/bigdata/util/TestBits.java 2011-05-10 19:39:39 UTC (rev 4479) @@ -0,0 +1,166 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Aug 6, 2009 + */ + +package com.bigdata.util; + +import junit.framework.TestCase2; + +/** + * Test suite for {@link Bits}. + */ +public class TestBits extends TestCase2 { + + /** + * + */ + public TestBits() { + } + + /** + * @param name + */ + public TestBits(String name) { + super(name); + } + + public void test_ctor1() { + +// final byte[] d = new byte[1]; +// final ByteBuffer b = ByteBuffer.wrap(d); +// final BitVector v = new ByteBufferBitVector(b); +// +// assertEquals("length", 8L, v.length()); + + byte v = 0; + + // verify range check. + try { + Bits.get(v, -1); + fail("Expecting: " + IndexOutOfBoundsException.class); + } catch (IndexOutOfBoundsException ex) { + if (log.isInfoEnabled()) + log.info("Ignoring expected exception: " + ex); + } + + // verify range check. + try { + Bits.get(v, 8); + fail("Expecting: " + IndexOutOfBoundsException.class); + } catch (IndexOutOfBoundsException ex) { + if (log.isInfoEnabled()) + log.info("Ignoring expected exception: " + ex); + } + + for (int i = 0; i < 8; i++) + assertEquals(false, Bits.get(v, i)); + + // set bit zero. +// d[0] |= (1 << 0); + v = Bits.set(v, 0, true); + + if (log.isInfoEnabled()) + log.info(Bits.toString(v)); + assertEquals(true, Bits.get(v, 0)); + + // clear bit zero. +// d[0] &= ~(1 << 0); + v = Bits.set(v, 0, false); + + if (log.isInfoEnabled()) + log.info(Bits.toString(v)); + assertEquals(false, Bits.get(v, 0)); + + } + + /** + * Verify set/clear of each bit in the first byte. + */ + public void test_getBoolean() { + +// final byte[] d = new byte[1]; +// final ByteBuffer b = ByteBuffer.wrap(d); +// final BitVector v = new ByteBufferBitVector(b); + + byte v = 0; + + // verify all bits are zero. + for (int i = 0; i < 8; i++) + assertEquals(false, Bits.get(v, i)); + + // set/clear each bit in the first byte in turn. + for (int i = 0; i < 8; i++) { + + // set bit +// d[0] |= (1 << i); + v = Bits.set(v, i, true); + + if (log.isInfoEnabled()) + log.info(Bits.toString(v) + " : i=" + i + ", (1<<" + i + ")=" + + (1 << i)); + assertEquals(true, Bits.get(v, i)); + + // clear bit +// d[0] &= ~(1 << i); + v = Bits.set(v, i, false); + + if (log.isInfoEnabled()) + log.info(Bits.toString(v)); + assertEquals(false, Bits.get(v, i)); + + } + + } + + /** + * Verify set/clear of each bit in the first byte. + */ + public void test_getMask() { + + byte v = 0; + + // verify all bits are zero. + for (int i = 0; i < 8; i++) + assertEquals(false, Bits.get(v, i)); + + // set each bit in the byte + for (int i = 0; i < 8; i++) { + + // set bit + v = Bits.set(v, i, true); + assertEquals(true, Bits.get(v, i)); + + } + + // mask off all but the 0 and 1 bits + v = Bits.mask(v, 0, 1); + if (log.isInfoEnabled()) + log.info(Bits.toString(v)); + assertEquals(3, v); + + } + +} Property changes on: trunk/bigdata-jini/src/java/com/bigdata/attr ___________________________________________________________________ Modified: svn:mergeinfo - + /branches/SIDS_REFACTOR_FROM_TRUNK_BRANCH/bigdata-jini/src/java/com/bigdata/attr:4393-4478 Property changes on: trunk/bigdata-jini/src/java/com/bigdata/disco ___________________________________________________________________ Modified: svn:mergeinfo - + /branches/SIDS_REFACTOR_FROM_TRUNK_BRANCH/bigdata-jini/src/java/com/bigdata/disco:4393-4478 Property changes on: trunk/bigdata-jini/src/java/com/bigdata/disco/config ___________________________________________________________________ Deleted: svn:mergeinfo - Property changes on: trunk/bigdata-jini/src/java/com/bigdata/util/config ___________________________________________________________________ Modified: svn:mergeinfo - + /branches/SIDS_REFACTOR_FROM_TRUNK_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:4393-4478 Property changes on: trunk/bigdata-perf ___________________________________________________________________ Modified: svn:mergeinfo - + /branches/SIDS_REFACTOR_FROM_TRUNK_BRANCH/bigdata-perf:4393-4478 Property changes on: trunk/bigdata-perf/btc ___________________________________________________________________ Deleted: svn:mergeinfo - Property changes on: trunk/bigdata-perf/btc/src ___________________________________________________________________ Deleted: svn:mergeinfo - Property changes on: trunk/bigdata-perf/btc/src/resources ___________________________________________________________________ Deleted: svn:mergeinfo - Property changes on: trunk/bigdata-perf/btc/src/resources/logging ___________________________________________________________________ Deleted: svn:mergeinfo - Property changes on: trunk/bigdata-perf/lubm/lib ___________________________________________________________________ Deleted: svn:mergeinfo - Property changes on: trunk/bigdata-perf/lubm/src/resources ___________________________________________________________________ Deleted: svn:mergeinfo - Property changes on: trunk/bigdata-perf/lubm/src/resources/answers (U1) ___________________________________________________________________ Deleted: svn:mergeinfo - Property changes on: trunk/bigdata-perf/lubm/src/resources/config ___________________________________________________________________ Deleted: svn:mergeinfo - Property changes on: trunk/bigdata-perf/lubm/src/resources/logging ___________________________________________________________________ Deleted: svn:mergeinfo - Property changes on: trunk/bigdata-perf/lubm/src/resources/scripts ___________________________________________________________________ Deleted: svn:mergeinfo - Property changes on: trunk/bigdata-perf/uniprot ___________________________________________________________________ Deleted: svn:mergeinfo - Property changes on: trunk/bigdata-perf/uniprot/src ___________________________________________________________________ Deleted: svn:mergeinfo - Property changes on: trunk/bigdata-perf/uniprot/src/resources ___________________________________________________________________ Deleted: svn:mergeinfo - Property changes on: trunk/bigdata-perf/uniprot/src/resources/logging ___________________________________________________________________ Deleted: svn:mergeinfo - Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/axioms/BaseAxioms.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/axioms/BaseAxioms.java 2011-05-10 19:22:45 UTC (rev 4478) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/axioms/BaseAxioms.java 2011-05-10 19:39:39 UTC (rev 4479) @@ -285,7 +285,7 @@ metadata.setBranchingFactor(branchingFactor); - tupleSer = new SPOTupleSerializer(SPOKeyOrder.SPO); + tupleSer = new SPOTupleSerializer(SPOKeyOrder.SPO, false/* sids */); metadata.setTupleSerializer(tupleSer); Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java 2011-05-10 19:22:45 UTC (rev 4478) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java 2011-05-10 19:39:39 UTC (rev 4479) @@ -1,14 +1,13 @@ package com.bigdata.rdf.changesets; import java.util.Iterator; -import java.util.Map; + import org.apache.log4j.Logger; + import com.bigdata.rdf.changesets.IChangeRecord.ChangeAction; -import com.bigdata.rdf.internal.IV; -import com.bigdata.rdf.model.BigdataBNode; import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.ModifiedEnum; import com.bigdata.rdf.spo.SPO; -import com.bigdata.rdf.spo.ISPO.ModifiedEnum; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.relation.accesspath.IElementFilter; import com.bigdata.striterator.ChunkedArrayIterator; Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/Justification.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/Justification.java 2011-05-10 19:22:45 UTC (rev 4478) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/Justification.java 2011-05-10 19:39:39 UTC (rev 4479) @@ -921,7 +921,7 @@ // assert arity == 3; // Note: keys are SPOs; no values stored for the tuples. - tupleSer = new SPOTupleSerializer(SPOKeyOrder.SPO, + tupleSer = new SPOTupleSerializer(SPOKeyOrder.SPO, false/* sids */, DefaultTupleSerializer.getDefaultLeafKeysCoder(), EmptyRabaValueCoder.INSTANCE); Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/ILexiconConfiguration.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/ILexiconConfiguration.java 2011-05-10 19:22:45 UTC (rev 4478) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/ILexiconConfiguration.java 2011-05-10 19:39:39 UTC (rev 4479) @@ -72,91 +72,4 @@ */ void initExtensions(final IDatatypeURIResolver resolver); - /** - * <code>true</code> iff the <code>vte</code> and <code>dte</code> - * should be inlined. - * - * @param vte - * the term type - * @param dte - * the data type - public boolean isInline(VTE vte, DTE dte); - */ - -// /** -// * <code>true</code> iff <code>xsd:boolean</code> should be inlined. -// */ -// public boolean isBooleanInline(); -// -// /** -// * <code>true</code> iff the fixed size numerics (<code>xsd:int</code>, -// * <code>xsd:short</code>, <code>xsd:float</code>, etc) should be inlined. -// */ -// public boolean isSmallNumericInline(); -// -// /** -// * <code>true</code> iff xsd:integer should be inlined. -// * <p> -// * Note: The maximum length for the encoding is ~32kb per key. With a B+Tree -// * branching factor of 256 that is ~ 8MB per leaf before compression. While -// * that is definitely large, it is not so outrageous that we need to forbid -// * it. -// */ -// public boolean isXSDIntegerInline(); -// -// /** -// * <code>true</code> iff <code>xsd:decimal</code> should be inlined. -// */ -// public boolean isXSDDecimalInline(); -// -// /** -// * <code>true</code> iff blank node identifiers should be inlined. This -// * is only possible when the blank node identifiers are internally -// * generated {@link UUID}s since otherwise they can be arbitrary Unicode -// * strings which, like text-based Literals, can not be inlined. -// * <p> -// * This option is NOT compatible with -// * {@link AbstractTripleStore.Options#STORE_BLANK_NODES}. -// */ -// public boolean isBlankNodeInline(); -// -// /** -// * <code>true</code> if UUID values (other than blank nodes) should be -// * inlined. -// */ -// public boolean isUUIDInline(); -// -// /** -// * Option to enable storing of long literals (over a configured -// * threshold) as blob references. The TERM2ID index would have a -// * hash function (MD5, SHA-1, SHA-2, etc) of the value and assign -// * a termId. The ID2TERM index would map the termId to a blob -// * reference. The blob data would be stored in the journal and -// * migrate into index segments during overflow processing for -// * scale-out. -// */ -// public boolean isLongLiteralAsBlob(); -// -// /** -// * Return the {@link MessageDigest} used to compute a hash code for a long -// * literal. The message digest should compute a hash function with a very -// * small probability of collisions. In general, <code>SHA-256</code> (32 -// * bytes), <code>SHA-384</code> (48 bytes) and <code>SHA-512</code> (64 -// * byte) should be reasonable choices. -// * <p> -// * Appropriate hash algorithms are defined in the <a -// * href="http://csrc.nist.gov/publications/fips/index.html">FIPS PUB -// * 180-2</a> (which has been replaced by <a href= -// * "http://csrc.nist.gov/publications/fips/fips180-3/fips180-3_final.pdf" -// * >FIPS PUB 180-3</a>. Also see Recommendation for Applications Using -// * Approved Hash Algorithms in <a href= -// * "http://csrc.nist.gov/publications/nistpubs/800-107/NIST-SP-800-107.pdf" -// * >SP 800-107</a>, which provides information about the collision -// * resistance of these hash algorithms. -// * -// * @return A {@link MessageDigest} object which can be used to compute the -// * hash code for a long literal. -// */ -// public MessageDigest getLongLiteralMessageDigest(); -// } Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2011-05-10 19:22:45 UTC (rev 4478) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2011-05-10 19:39:39 UTC (rev 4479) @@ -31,6 +31,9 @@ import java.math.BigInteger; import java.util.ArrayList; import java.util.UUID; + +import org.apache.log4j.Logger; + import com.bigdata.btree.keys.IKeyBuilder; import com.bigdata.btree.keys.KeyBuilder; import com.bigdata.rawstore.Bytes; @@ -38,6 +41,9 @@ import com.bigdata.rdf.internal.constraints.InlineGT; import com.bigdata.rdf.model.BigdataBNode; import com.bigdata.rdf.model.BigdataLiteral; +import com.bigdata.rdf.model.StatementEnum; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.SPOKeyOrder; /** @@ -45,6 +51,8 @@ */ public class IVUtility { + private static final transient Logger log = Logger.getLogger(IVUtility.class); + public static boolean equals(IV iv1, IV iv2) { // same IV or both null @@ -231,23 +239,42 @@ */ public static IV[] decode(final byte[] key, final int numTerms) { + return decode(key, 0 /* offset */, numTerms); + + } + + /** + * Decodes up to numTerms {@link IV}s from a byte[]. + * + * @param key + * The byte[]. + * @param offset + * The offset into the byte[] key. + * @param numTerms + * The number of terms to decode. + * + * @return The set of {@link IV}s. + */ + public static IV[] decode(final byte[] key, final int offset, + final int numTerms) { + if (numTerms <= 0) return new IV[0]; final IV[] ivs = new IV[numTerms]; - int offset = 0; + int o = offset; for (int i = 0; i < numTerms; i++) { - if (offset >= key.length) + if (o >= key.length) throw new IllegalArgumentException( "key is not long enough to decode " + numTerms + " terms."); - ivs[i] = decodeFromOffset(key, offset); + ivs[i] = decodeFromOffset(key, o); - offset += ivs[i] == null + o += ivs[i] == null ? NullIV.INSTANCE.byteLength() : ivs[i].byteLength(); } @@ -311,6 +338,22 @@ */ // The value type (URI, Literal, BNode, SID) final VTE vte = AbstractIV.getInternalValueTypeEnum(flags); + + // handle inline sids + if (vte == VTE.STATEMENT) { + + // spo is directly decodable from key + final ISPO spo = SPOKeyOrder.SPO.decodeKey(key, o); + + // all spos that have a sid are explicit + spo.setStatementType(StatementEnum.Explicit); + spo.setStatementIdentifier(true); + + // create a sid iv and return it + final SidIV sid = new SidIV(spo); + return sid; + + } // The data type final DTE dte = AbstractIV.getInternalDataTypeEnum(flags); Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java 2011-05-10 19:22:45 UTC (rev 4478) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java 2011-05-10 19:39:39 UTC (rev 4479) @@ -208,7 +208,7 @@ final BNode b = (BNode) value; final String id = b.getID(); - + final char c = id.charAt(0); if (c == 'u') { @@ -270,12 +270,11 @@ } - /** - * See {@link ILexiconConfiguration#isInline(VTE, DTE)}. - */ - public boolean isInline(final VTE vte, final DTE dte) { + private boolean isInline(final VTE vte, final DTE dte) { switch (vte) { + case STATEMENT: + return true; case BNODE: return inlineBNodes && isSupported(dte); case LITERAL: Copied: trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/SidIV.java (from rev 4478, branches/SIDS_REFACTOR_FROM_TRUNK_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/SidIV.java) =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/SidIV.java (rev 0) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/SidIV.java 2011-05-10 19:39:39 UTC (rev 4479) @@ -0,0 +1,212 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.internal; + +import java.math.BigInteger; + +import org.apache.log4j.Logger; + +import com.bigdata.btree.keys.IKeyBuilder; +import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.rdf.model.BigdataBNode; +import com.bigdata.rdf.model.BigdataValueFactory; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.SPO; +import com.bigdata.rdf.spo.SPOComparator; +import com.bigdata.rdf.spo.SPOKeyOrder; + +/** + * Internal value representing an inline statement identifier. Uses the + * {@link ISPO} supplied in the ctor as the inline value. The + * {@link #asValue(BigdataValueFactory, ILexiconConfiguration)} method returns a + * {@link BigdataBNode} that is used to represent the sid in serialization + * formats (such as the custom RDF/XML extension for sids). The bnode is + * guaranteed to always have the same bnode id for a given inlined SPO. This is + * accomplished using the byte[] key encoding for the spo along with the + * BigInteger class. + * <p> + * This internal value has a {@link VTE} of {@link VTE#STATEMENT}. It is encoded + * into the statement indices by directly encoding the spo using + * {@link SPOKeyOrder#encodeKey(IKeyBuilder, ISPO)} via the + * {@link SPOKeyOrder#SPO} key order. Thus when decoded from the statement + * indices, the spo associated with this sid is materialized directly from the + * sid itself. See {@link IVUtility#decode(byte[])}. The spo decoded from the + * sid IV will be marked as explicit (only explicit statements have sids) and + * this SidIV will be attached to it. This completely eliminates the need for a + * reverse index from sid->spo, as the spo is encoded inline into the SidIV + * itself. This replaces the TermId model for representing sids. + * <p> + * {@inheritDoc} + */ +public class SidIV<V extends BigdataBNode> extends + AbstractInlineIV<V, ISPO> { + + /** + * + */ + private static final long serialVersionUID = 685148537376856907L; + + protected static final Logger log = Logger.getLogger(SidIV.class); + + /** + * The inline spo. + */ + private final ISPO spo; + + /** + * The cached byte[] key for the encoding of this IV. + */ + private transient byte[] key; + + /** + * The cached materialized BigdataValue for this sid. + */ + private transient V bnode; + + /** + * Ctor with internal value spo specified. + */ + public SidIV(final ISPO spo) { + + /* + * Note: XSDBoolean happens to be assigned the code value of 0, which is + * the value we we want when the data type enumeration will be ignored. + */ + super(VTE.STATEMENT, DTE.XSDBoolean); + + this.spo = spo; + + } + + /** + * Returns the inline spo. + */ + public ISPO getInlineValue() throws UnsupportedOperationException { + return spo; + } + + /** + * Returns the bnode representation of this IV, useful for serialization + * formats such as RDF/XML. See {@link #bnodeId()}. + */ + public V asValue(final BigdataValueFactory f, + final ILexiconConfiguration config) { + if (bnode == null) { + bnode = (V) f.createBNode(bnodeId()); + bnode.setIV(this); + bnode.setStatementIdentifier(true); + } + return bnode; + } + + /** + * Return the byte length for the byte[] encoded representation of this + * internal value. Depends on the byte length of the encoded inline spo. + */ + public int byteLength() { + return 1 + key().length; + } + + public String toString() { + return stringValue(); + } + + public String stringValue() { + return "Sid("+toString(spo)+")"; + } + + /** + * Pretty print the inline spo. Calling SPO.toString() results in an + * infinite loop. + */ + private static String toString(final ISPO spo) { + return (SPO.toString(spo.s()) + ":" + + SPO.toString(spo.p()) + ":" + + SPO.toString(spo.o())); + } + + public int hashCode() { + return spo.hashCode(); + } + + /** + * Using the BigInteger class to create a unique bnode id based on the + * byte[] key of the inline spo. + */ + private String bnodeId() { +// // just use the hash code. can result in collisions +// return String.valueOf(hashCode()); + + // create a big integer using the spo key. should result in unique ids + final byte[] key = key(); + final int signum = key.length > 0 ? 1 : 0; + final BigInteger bi = new BigInteger(signum, key); + return bi.toString(); + } + + public boolean equals(Object o) { + if (this == o) + return true; + if (o instanceof SidIV) { + final ISPO spo2 = ((SidIV) o).spo; + return spo.equals(spo2); + } + return false; + } + + protected int _compareTo(IV o) { + final ISPO spo2 = ((SidIV) o).spo; + return SPOComparator.INSTANCE.compare(spo, spo2); + } + + /** + * Encode this internal value into the supplied key builder. Emits the + * flags, following by the encoded byte[] representing the spo, in SPO + * key order. + * <p> + * {@inheritDoc} + */ + public IKeyBuilder encode(final IKeyBuilder keyBuilder) { + + // First emit the flags byte. + keyBuilder.append(flags()); + + // Then append the SPO's key in SPOKeyOrder.SPO + keyBuilder.append(key()); + + return keyBuilder; + + } + + private byte[] key() { + if (key == null) { + /* + * Build the SPO's key in SPOKeyOrder.SPO. + */ + key = SPOKeyOrder.SPO.encodeKey(new KeyBuilder(), spo); + } + return key; + } + +} \ No newline at end of file Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2011-05-10 19:22:45 UTC (rev 4478) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2011-05-10 19:39:39 UTC (rev 4479) @@ -1350,197 +1350,197 @@ } - /** - * Assign unique statement identifiers to triples. - * <p> - * Each distinct {@link StatementEnum#Explicit} {s,p,o} is assigned a unique - * statement identifier using the {@link LexiconKeyOrder#TERM2ID} index. The - * assignment of statement identifiers is <i>consistent</i> using an - * unisolated atomic write operation similar to - * {@link #addTerms(BigdataValue[], int, boolean)} - * <p> - * Note: Statement identifiers are NOT inserted into the reverse (id:term) - * index. Instead, they are written into the values associated with the - * {s,p,o} in each of the statement indices. That is handled by - * {@link AbstractTripleStore#addStatements(AbstractTripleStore, boolean, IChunkedOrderedIterator, IElementFilter)} - * , which is also responsible for invoking this method in order to have the - * statement identifiers on hand before it writes on the statement indices. - * <p> - * Note: The caller's {@link ISPO}[] is sorted into SPO order as a - * side-effect. - * <p> - * Note: The statement identifiers are assigned to the {@link ISPO}s as a - * side-effect. - * <p> - * Note: SIDs are NOT supported for quads, so this code is never executed - * for quads. - */ - public void addStatementIdentifiers(final ISPO[] a, final int n) { +// /** +// * Assign unique statement identifiers to triples. +// * <p> +// * Each distinct {@link StatementEnum#Explicit} {s,p,o} is assigned a unique +// * statement identifier using the {@link LexiconKeyOrder#TERM2ID} index. The +// * assignment of statement identifiers is <i>consistent</i> using an +// * unisolated atomic write operation similar to +// * {@link #addTerms(BigdataValue[], int, boolean)} +// * <p> +// * Note: Statement identifiers are NOT inserted into the reverse (id:term) +// * index. Instead, they are written into the values associated with the +// * {s,p,o} in each of the statement indices. That is handled by +// * {@link AbstractTripleStore#addStatements(AbstractTripleStore, boolean, IChunkedOrderedIterator, IElementFilter)} +// * , which is also responsible for invoking this method in order to have the +// * statement identifiers on hand before it writes on the statement indices. +// * <p> +// * Note: The caller's {@link ISPO}[] is sorted into SPO order as a +// * side-effect. +// * <p> +// * Note: The statement identifiers are assigned to the {@link ISPO}s as a +// * side-effect. +// * <p> +// * Note: SIDs are NOT supported for quads, so this code is never executed +// * for quads. +// */ +// public void addStatementIdentifiers(final ISPO[] a, final int n) { +// +// // * @throws UnsupportedOperationException +//// * if {@link Options#STATEMENT_IDENTIFIERS} was not specified. +//// * +//// if (!statementIdentifiers) +//// throw new UnsupportedOperationException(); +// +// if (n == 0) +// return; +// +// final long begin = System.currentTimeMillis(); +// final long keyGenTime; // time to convert {s,p,o} to byte[] sort keys. +// final long sortTime; // time to sort terms by assigned byte[] keys. +// final long insertTime; // time to insert terms into the term:id index. +// +// /* +// * Sort the caller's array into SPO order. This order will correspond to +// * the total order of the term:id index. +// * +// * Note: This depends critically on SPOComparator producing the same +// * total order as we would obtain by an unsigned byte[] sort of the +// * generated sort keys. +// * +// * Note: the keys for the term:id index are NOT precisely the keys used +// * by the SPO index since there is a prefix code used to mark the keys +// * are Statements (vs Literals, BNodes, or URIs). +// */ +// { +// +// final long _begin = System.currentTimeMillis(); +// +// Arrays.sort(a, 0, n, SPOComparator.INSTANCE); +// +// sortTime = System.currentTimeMillis() - _begin; +// +// } +// +// /* +// * Insert into the forward index (term -> id). This will either assign a +// * statement identifier or return the existing statement identifier if +// * the statement is already in the lexicon (the statement identifier is +// * in a sense a term identifier since it is assigned by the term:id +// * index). +// * +// * Note: Since we only assign statement identifiers for explicit +// * statements the caller's SPO[] can not be directly correlated to the +// * keys[]. We copy the references into b[] so that we can keep that +// * correlation 1:1. +// */ +// final byte[][] keys = new byte[n][]; +// final ISPO[] b = new ISPO[n]; +// +// /* +// * Generate the sort keys for the term:id index. +// */ +// int nexplicit = 0; +// { +// +// final long _begin = System.currentTimeMillis(); +// +// // local instance, no unicode support. +// final IKeyBuilder keyBuilder = KeyBuilder +// .newInstance(1/* statement byte */+ (3/* triple */* Bytes.SIZEOF_LONG)); +// +// for (int i = 0; i < n; i++) { +// +// final ISPO spo = a[i]; +// +// if (!spo.isExplicit()) +// continue; +// +// if (!spo.isFullyBound()) +// throw new IllegalArgumentException("Not fully bound: " +// + spo.toString(/*this*/)); +// +// /* +// * Creating a dummy term for the Term2Id index. +// */ +// keyBuilder.reset().append(ITermIndexCodes.TERM_CODE_STMT); +// spo.s().encode(keyBuilder); +// spo.p().encode(keyBuilder); +// spo.o().encode(keyBuilder); +// keys[nexplicit] = keyBuilder.getKey(); +// +// // Note: keeps correlation between key and SPO. +// b[nexplicit] = spo; +// +// nexplicit++; +// +// } +// +// keyGenTime = System.currentTimeMillis() - _begin; +// +// } +// +// /* +// * Execute a remote unisolated batch operation that assigns the +// * statement identifier. +// */ +// { +// +// final long _begin = System.currentTimeMillis(); +// +// final IIndex termIdIndex = getTerm2IdIndex(); +// +// // run the procedure. +// if (nexplicit > 0) { +// +// termIdIndex.submit(0/* fromIndex */, nexplicit/* toIndex */, +// keys, null/* vals */, new Term2IdWriteProcConstructor( +// false/* readOnly */, storeBlankNodes, //scaleOutTermIds, +// termIdBitsToReverse), +// new IResultHandler<Term2IdWriteProc.Result, Void>() { +// +// /** +// * Copy the assigned / discovered statement +// * identifiers onto the corresponding elements of +// * the SPO[]. +// */ +// public void aggregate(Term2IdWriteProc.Result result, +// Split split) { +// +// for (int i = split.fromIndex, j = 0; i < split.toIndex; i++, j++) { +// +//// if (b[i].c() != 0L +//// && b[i].c() != result.ids[j]) { +//// System.err.println("spo=" +//// + getContainer().toString(b[i]) +//// + ", sid=" +//// + getContainer().toString( +//// result.ids[j])); +//// } +// +// b[i].setStatementIdentifier(result.ivs[j]); +// +// } +// +// } +// +// public Void getResult() { +// +// return null; +// +// } +// +// }); +// +// } +// +// insertTime = System.currentTimeMillis() - _begin; +// +// } +// +// final long elapsed = System.currentTimeMillis() - begin; +// +// if (log.isInfoEnabled() && n > 1000 || elapsed > 3000) { +// +// log.info("Wrote " + n + " in " + elapsed + "ms; keygen=" +// + keyGenTime + "ms, sort=" + sortTime + "ms, insert=" +// + insertTime + "ms"); +// +// } +// +// } - // * @throws UnsupportedOperationException -// * if {@link Options#STATEMENT_IDENTIFIERS} was not specified. -// * -// if (!statementIdentifiers) -// throw new UnsupportedOperationException(); - - if (n == 0) - return; - - final long begin = System.currentTimeMillis(); - final long keyGenTime; // time to convert {s,p,o} to byte[] sort keys. - final long sortTime; // time to sort terms by assigned byte[] keys. - final long insertTime; // time to insert terms into the term:id index. - - /* - * Sort the caller's array into SPO order. This order will correspond to - * the total order of the term:id index. - * - * Note: This depends critically on SPOComparator producing the same - * total order as we would obtain by an unsigned byte[] sort of the - * generated sort keys. - * - * Note: the keys for the term:id index are NOT precisely the keys used - * by the SPO index since there is a prefix code used to mark the keys - * are Statements (vs Literals, BNodes, or URIs). - */ - { - - final long _begin = System.currentTimeMillis(); - - Arrays.sort(a, 0, n, SPOComparator.INSTANCE); - - sortTime = System.currentTimeMillis() - _begin; - - } - - /* - * Insert into the forward index (term -> id). This will either assign a - * statement identifier or return the existing statement identifier if - * the statement is already in the lexicon (the statement identifier is - * in a sense a term identifier since it is assigned by the term:id - * index). - * - * Note: Since we only assign statement identifiers for explicit - * statements the caller's SPO[] can not be directly correlated to the - * keys[]. We copy the references into b[] so that we can keep that - * correlation 1:1. - */ - final byte[][] keys = new byte[n][]; - final ISPO[] b = new ISPO[n]; - - /* - * Generate the sort keys for the term:id index. - */ - int nexplicit = 0; - { - - final long _begin = System.currentTimeMillis(); - - // local instance, no unicode support. - final IKeyBuilder keyBuilder = KeyBuilder - .newInstance(1/* statement byte */+ (3/* triple */* Bytes.SIZEOF_LONG)); - - for (int i = 0; i < n; i++) { - - final ISPO spo = a[i]; - - if (!spo.isExplicit()) - continue; - - if (!spo.isFullyBound()) - throw new IllegalArgumentException("Not fully bound: " - + spo.toString(/*this*/)); - - /* - * Creating a dummy term for the Term2Id index. - */ - keyBuilder.reset().append(ITermIndexCodes.TERM_CODE_STMT); - spo.s().encode(keyBuilder); - spo.p().encode(keyBuilder); - spo.o().encode(keyBuilder); - keys[nexplicit] = keyBuilder.getKey(); - - // Note: keeps correlation between key and SPO. - b[nexplicit] = spo; - - nexplicit++; - - } - - keyGenTime = System.currentTimeMillis() - _begin; - - } - - /* - * Execute a remote unisolated batch operation that assigns the - * statement identifier. - */ - { - - final long _begin = System.currentTimeMillis(); - - final IIndex termIdIndex = getTerm2IdIndex(); - - // run the procedure. - if (nexplicit > 0) { - - termIdIndex.submit(0/* fromIndex */, nexplicit/* toIndex */, - keys, null/* vals */, new Term2IdWriteProcConstructor( - false/* readOnly */, storeBlankNodes, //scaleOutTermIds, - termIdBitsToReverse), - new IResultHandler<Term2IdWriteProc.Result, Void>() { - - /** - * Copy the assigned / discovered statement - * identifiers onto the corresponding elements of - * the SPO[]. - */ - public void aggregate(Term2IdWriteProc.Result result, - Split split) { - - for (int i = split.fromIndex, j = 0; i < split.toIndex; i++, j++) { - -// if (b[i].c() != 0L -// && b[i].c() != result.ids[j]) { -// System.err.println("spo=" -// + getContainer().toString(b[i]) -// + ", sid=" -// + getContainer().toString( -// result.ids[j])); -// } - - b[i].setStatementIdentifier(result.ivs[j]); - - } - - } - - public Void getResult() { - - return null; - - } - - }); - - } - - insertTime = System.currentTimeMillis() - _begin; - - } - - final long elapsed = System.currentTimeMillis() - begin; - - if (log.isInfoEnabled() && n > 1000 || elapsed > 3000) { - - log.info("Wrote " + n + " in " + elapsed + "ms; keygen=" - + keyGenTime + "ms, sort=" + sortTime + "ms, insert=" - + insertTime + "ms"); - - } - - } - /** * <p> * Add the terms to the full text index so that we can do fast lookup of the @@ -2168,26 +2168,28 @@ if (tid.isStatement()) { - /* - * Statement identifiers are not stored in the reverse lexicon (or - * the cache). - * - * A statement identifier is externalized as a BNode. The "S" prefix - * is a syntactic marker for those in the know to indicate that the - * BNode corresponds to a statement identifier. - */ +// /* +// * Statement identifiers are not stored in the reverse lexicon (or +// * the cache). +// * +// * A statement identifier is externalized as a BNode. The "S" prefix +// * is a syntactic marker for those in the know to indicate that the +// * BNode corresponds to a statement identifier. +// */ +// +// final BigdataBNode stmt = valueFactory.createBNode("S" +// + Long.toString(tid.getTermId())); +// +// // set the term identifier on the object. +// stmt.setIV(tid); +// +// // mark as a statement identifier. +// stmt.setStatementIdentifier(true); +// +// return stmt; + + throw new IllegalArgumentException("sids should be inline"); - final BigdataBNode stmt = valueFactory.createBNode("S" - + Long.toString(tid.getTermId())); - - // set the term identifier on the object. - stmt.setIV(tid); - - // mark as a statement identifier. - stmt.setStatementIdentifier(true); - - return stmt; - } if (!storeBlankNodes && tid.isBNode()) { Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicIndexWriteProc.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicIndexWriteProc.java 2011-05-10 19:22:45 UTC (rev 4478) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicIndexWriteProc.java 2011-05-10 19:39:39 UTC (rev 4479) @@ -35,34 +35,8 @@ import com.bigdata.btree.proc.AbstractKeyArrayIndexProcedureConstructor; import com.bigdata.btree.proc.IParallelizableIndexProcedure; import com.bigdata.btree.raba.codec.IRabaCoder; -import com.bigdata.io.ByteArrayBuffer; -import com.bigdata.rdf.model.StatementEnum; import com.bigdata.relation.IMutableRelationIndexWriteProcedure; -/** - * Procedure for batch index on a single statement index (or index partition). - * <p> - * The key for each statement encodes the {s:p:o} of the statement in the order - * that is appropriate for the index (SPO, POS, OSP, etc). The key is written - * unchanged on the index. - * <p> - * The value for each statement is a byte that encodes the {@link StatementEnum} - * and also encodes whether or not the "override" flag is set using - see - * {@link StatementEnum#MASK_OVERRIDE} - followed by 8 bytes representing the - * statement identifier IFF statement identifiers are enabled AND the - * {@link StatementEnum} is {@link StatementEnum#Explicit}. The value requires - * interpretation to determine the byte[] that will be written as the value on - * the index - see the code for... [truncated message content] |
From: <tho...@us...> - 2011-05-16 12:53:04
|
Revision: 4504 http://bigdata.svn.sourceforge.net/bigdata/?rev=4504&view=rev Author: thompsonbry Date: 2011-05-16 12:52:57 +0000 (Mon, 16 May 2011) Log Message: ----------- Added the 0.84.0 release notes and bumped the version number in preparation for a release. Modified Paths: -------------- trunk/build.properties Added Paths: ----------- trunk/bigdata/src/releases/RELEASE_0_84_0.txt Added: trunk/bigdata/src/releases/RELEASE_0_84_0.txt =================================================================== --- trunk/bigdata/src/releases/RELEASE_0_84_0.txt (rev 0) +++ trunk/bigdata/src/releases/RELEASE_0_84_0.txt 2011-05-16 12:52:57 UTC (rev 4504) @@ -0,0 +1,68 @@ +This is a bigdata (R) release. This release is capable of loading 1B triples in +under one hour on a 15 node cluster. JDK 1.6 is required. + +See [1,2] for instructions on installing bigdata(R), [4] for the javadoc, and +[3,5,6] for news, questions, and the latest developments. For more information +about SYSTAP, LLC and bigdata, see [7]. + +Please note that we recommend checking out the code from SVN using the tag for +this release. The code will build automatically under eclipse. You can also +build the code using the ant script. The cluster installer requires the use of +the ant script. You can checkout this release from the following URL: + +https://bigdata.svn.sourceforge.net/svnroot/bigdata/branches/BIGDATA_RELEASE_0_84_0 + +New features: + +- Inlining provenance metadata into the statement indices and fast reverse + lookup of provenance metadata using statement identifiers (SIDs). + +Significant bug fixes: + +- The journal size could double in some cases following a restart due to a type + in the WORMStrategy constructor. + + See https://sourceforge.net/apps/trac/bigdata/ticket/236 + +- Fixed a concurrency hole in the commit protocol for the Journal which could + result in a concurrent modification to the B+Tree during the commit protocol. + +- Fixed a problem in the abort protocol for the BigdataSail: + +- Fixed a problem where the BigdataSail would permit the same thread to obtain + more than one UNISOLATED connection: + + See https://sourceforge.net/apps/trac/bigdata/ticket/278 + See https://sourceforge.net/apps/trac/bigdata/ticket/284 + See https://sourceforge.net/apps/trac/bigdata/ticket/288 + See https://sourceforge.net/apps/trac/bigdata/ticket/289 + +The road map [3] for the next releases includes: + +- Single machine data storage to 10B+ triples; +- Simple embedded and/or webapp deployment; +- 100% native SPARQL evaluation with lots of query optimizations; +- High-volume analytic query and SPARQL 1.1 query, including aggregations; +- Simplified deployment, configuration, and administration for clusters. +- High availability for the journal and the cluster; + +For more information, please see the following links: + +[1] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Main_Page +[2] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=GettingStarted +[3] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Roadmap +[4] http://www.bigdata.com/bigdata/docs/api/ +[5] http://sourceforge.net/projects/bigdata/ +[6] http://www.bigdata.com/blog +[7] http://www.systap.com/bigdata.htm + +About bigdata: + +Bigdata\xAE is a horizontally-scaled, general purpose storage and computing fabric +for ordered data (B+Trees), designed to operate on either a single server or a +cluster of commodity hardware. Bigdata\xAE uses dynamically partitioned key-range +shards in order to remove any realistic scaling limits - in principle, bigdata\xAE +may be deployed on 10s, 100s, or even thousands of machines and new capacity may +be added incrementally without requiring the full reload of all data. The bigdata\xAE +RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), +and datum level provenance. Property changes on: trunk/bigdata/src/releases/RELEASE_0_84_0.txt ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: trunk/build.properties =================================================================== --- trunk/build.properties 2011-05-15 18:46:17 UTC (rev 4503) +++ trunk/build.properties 2011-05-16 12:52:57 UTC (rev 4504) @@ -36,8 +36,8 @@ release.dir=ant-release # The build version (note: 0.82b -> 0.82.0) -build.ver=0.83.2 -build.ver.osgi=0.83 +build.ver=0.84.0 +build.ver.osgi=0.84 # Set true to do a snapshot build. This changes the value of ${version} to # include the date. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |