This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tho...@us...> - 2014-05-19 14:26:16
|
Revision: 8359 http://sourceforge.net/p/bigdata/code/8359 Author: thompsonbry Date: 2014-05-19 14:26:12 +0000 (Mon, 19 May 2014) Log Message: ----------- Bug fix for #940 (HA LBS breaks tomcat deployment). The root cause is that the ProxyServlet is not available under tomcat (or anything else besides jetty). Therefore it can not be configured from the same web.xml file that is used for other platforms. To address this, I extracted the HA LBS configuration into a new override-web.xml file and then modified the NanoSparqlServer to locate that resource. The HA test suite also needed to be modified to explictly locate this resource. See #940 (HA LBS breaks tomcat deployment). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/override-web.xml Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-05-18 15:49:11 UTC (rev 8358) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-05-19 14:26:12 UTC (rev 8359) @@ -233,10 +233,10 @@ serviceDir = bigdata.serviceDir; + logicalServiceId = bigdata.logicalServiceId; + haLogDir = bigdata.logDir; - logicalServiceId = bigdata.logicalServiceId; - writePipelineAddr = new InetSocketAddress("localhost",bigdata.haPort); /* Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-05-18 15:49:11 UTC (rev 8358) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-05-19 14:26:12 UTC (rev 8359) @@ -61,7 +61,7 @@ private static fedname = "benchmark"; // The RMI port for the HAGlue interface (may be ZERO for a random port). - private static rmiPort = ConfigMath.add(9080,1); + private static rmiPort = ConfigMath.add(9080,2); // write replication pipeline port (listener). private static haPort = ConfigMath.add(9090,2); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-05-18 15:49:11 UTC (rev 8358) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-05-19 14:26:12 UTC (rev 8359) @@ -95,6 +95,7 @@ import com.bigdata.quorum.QuorumException; import com.bigdata.quorum.zk.ZKQuorumClient; import com.bigdata.quorum.zk.ZKQuorumImpl; +import com.bigdata.rdf.sail.webapp.NanoSparqlServer; import com.bigdata.rdf.sail.webapp.client.HttpException; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; import com.bigdata.service.jini.JiniClientConfig; @@ -135,6 +136,7 @@ */ static class ServiceListener implements IServiceListener { + @SuppressWarnings("unused") private volatile HAGlue haGlue; private volatile ProcessHelper processHelper; private volatile boolean dead = false; @@ -2226,7 +2228,7 @@ * Used to override the port at which jetty sets up the http * connection. */ - private final String TEST_JETTY_PORT = "jetty.port"; + private final String TEST_JETTY_PORT = NanoSparqlServer.SystemProperties.JETTY_PORT; /** * The path in the local file system to the root of the web @@ -2234,13 +2236,15 @@ * code, but the webapp gets deployed to the serviceDir for this * test suite. */ - private final String JETTY_RESOURCE_BASE = "jetty.resourceBase"; - + private final String JETTY_RESOURCE_BASE = NanoSparqlServer.SystemProperties.JETTY_RESOURCE_BASE; + + private final String JETTY_OVERRIDE_WEB_XML = NanoSparqlServer.SystemProperties.JETTY_OVERRIDE_WEB_XML; + /** * Used to override the <code>jetty.dump.start</code> environment * property. */ - private final String TEST_JETTY_DUMP_START = "jetty.dump.start"; + private final String TEST_JETTY_DUMP_START = NanoSparqlServer.SystemProperties.JETTY_DUMP_START; /** * The absolute effective path of the service directory. This is @@ -2290,6 +2294,9 @@ // Override the location of the webapp as deployed. cmds.add("-D" + JETTY_RESOURCE_BASE + "=."); + // Override the location of the override-web.xml file as deployed. + cmds.add("-D" + JETTY_OVERRIDE_WEB_XML + "=./WEB-INF/override-web.xml"); + // Override the jetty.dump.start. cmds.add("-D" + TEST_JETTY_DUMP_START + "=" + jettyDumpStart); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-18 15:49:11 UTC (rev 8358) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-19 14:26:12 UTC (rev 8359) @@ -191,6 +191,23 @@ */ String JETTY_RESOURCE_BASE = "jetty.resourceBase"; + /** + * The location of the <code>override-web.xml</code> resource. The + * default is given in <code>jetty.xml</code> and serves to locate the + * resource when deployed under an IDE. If not explicitly given, value + * of the environment variable is set by the same logic that sets the + * {@link #JETTY_RESOURCE_BASE} environment variable. This allows the + * <code>override-web.xml</code> resource to be found in its default + * location (which is the same directory / package as the + * <code>web.xml</code> file) while still preserving the ability to + * override the location of that resource explicitly by setting the + * environment variable before starting the server. + * + * @see <a href="http://trac.bigdata.com/ticket/940" > ProxyServlet in + * web.xml breaks tomcat WAR (HA LBS) </a> + */ + String JETTY_OVERRIDE_WEB_XML = "jetty.overrideWebXml"; + } /** @@ -439,10 +456,17 @@ public static void awaitServerStart(final Server server) throws InterruptedException, TimeoutException, Exception { +// Note: Does not appear to help. +// +// final WebAppContext wac = getWebApp(server); +// +// if (wac == null) +// throw new Exception("WebApp is not available?"); + final long timeout = Long.parseLong(System.getProperty( SystemProperties.JETTY_STARTUP_TIMEOUT, SystemProperties.DEFAULT_JETTY_STARTUP_TIMEOUT)); - + boolean ok = false; final long begin = System.nanoTime(); final long nanos = TimeUnit.SECONDS.toNanos(timeout); @@ -453,7 +477,8 @@ server.start(); // Await running. remaining = nanos - (System.nanoTime() - begin); - while (server.isStarting() && !server.isRunning() && remaining > 0) { + while (server.isStarting() && !server.isRunning() + /* && !wac.isRunning() */ && remaining > 0) { Thread.sleep(100/* ms */); // remaining = nanos - (now - begin) [aka elapsed] remaining = nanos - (System.nanoTime() - begin); @@ -461,6 +486,8 @@ if (remaining < 0) { throw new TimeoutException(); } +// if (!wac.isRunning()) +// throw new Exception("WebApp is not running?"); ok = true; } finally { if (!ok) { @@ -870,9 +897,18 @@ * to jetty itself since it will interpret the jetty.xml file * itself. */ + final String tmp = resourceBaseURL.toExternalForm(); + System.setProperty(SystemProperties.JETTY_RESOURCE_BASE, - resourceBaseURL.toExternalForm()); + tmp); + final URL overrideWebXmlURL = new URL(tmp + + (tmp.endsWith("/") ? "" : "/") + + "WEB-INF/override-web.xml"); + + System.setProperty(SystemProperties.JETTY_OVERRIDE_WEB_XML, + overrideWebXmlURL.toExternalForm()); + } } @@ -885,7 +921,9 @@ + ", isClassPath=" + isClassPath + ", jetty.resourceBase(effective)=" - + System.getProperty(SystemProperties.JETTY_RESOURCE_BASE)); + + System.getProperty(SystemProperties.JETTY_RESOURCE_BASE) + + ", jetty.overrideWebXml(effective)=" + + System.getProperty(SystemProperties.JETTY_OVERRIDE_WEB_XML)); } Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/override-web.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/override-web.xml (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/override-web.xml 2014-05-19 14:26:12 UTC (rev 8359) @@ -0,0 +1,100 @@ +<?xml version="1.0" encoding="UTF-8"?> +<web-app xmlns="http://java.sun.com/xml/ns/javaee" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_1.xsd" + version="3.1"> + <servlet> + <servlet-name>Load Balancer</servlet-name> + <description> + The HA Load Balancer servlet provides a transparent proxy for + requests arriving its configured URL pattern (the "external" + interface for the load balancer) to the root of the web + application. + + The use of the load balancer is entirely optional. If the + security rules permit, then clients MAY make requests directly + against a specific service. Thus, no specific provision exists + to disable the load balancer servlet, but you may choose not to + deploy it. + + When successfully deployed, requests having prefix corresponding to + the URL pattern for the load balancer are automatically redirected + to a joined service in the met quorum based on the configured load + balancer policy. + + Requests directed to /bigdata/LBS/leader are proxied to the quorum + leader - this URL must be used for non-idempotent requests + (updates). + + Requests directed to /bigdata/LBS/read are load balanced over the + services joined with the met quourm. This URL may only be used + with idempotent requests (reads). + + For non-HA deployments, requests are simply forwarded to the local + service after stripping off the /LBS/leader or /LBS/read prefix. + Thus, it is always safe to use the LBS request URLs. + + The load balancer policies are "HA aware." They will always + redirect update requests to the quorum leader. The default + polices will load balance read requests over the leader and + followers in a manner that reflects the CPU, IO Wait, and GC + Time associated with each service. The PlatformStatsPlugIn + and GangliaPlugIn MUST be enabled for the default load + balancer policy to operate. It depends on those plugins to + maintain a model of the load on the HA replication cluster. + The GangliaPlugIn should be run only as a listener if you are + are running the real gmond process on the host. If you are + not running gmond, then the GangliaPlugIn should be configured + as both a listener and a sender. + </description> + <servlet-class>com.bigdata.rdf.sail.webapp.HALoadBalancerServlet</servlet-class> + <load-on-startup>1</load-on-startup> + <async-supported>true</async-supported> + <init-param> + <param-name>policy</param-name> + <param-value>com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy</param-value> + <description> + The load balancer policy. This must be an instance of the + IHALoadBalancerPolicy interface. A default policy (NOPLBSPolicy) is + used when no value is specified. + + The policies differ ONLY in how they handle READ requests. All policies + proxy updates to the leader. If you do not want update proxying, then + use a URL that does not address the HALoadBalancerServlet. + + The following policies are pre-defined: + + com.bigdata.rdf.sail.webapp.lbs.policy.NOPLBSPolicy: + + Does not load balance read requests. + + com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy: + + Round robin for read requests. + + com.bigdata.rdf.sail.webapp.lbs.policy.counters.CountersLBSPolicy: + + Load based proxying for read requests using the build in http + service for reporting performance counters. This policy requires + the PlatformStatsPlugIn and may also require platform specific + metrics collection dependencies, e.g., sysstat. + + com.bigdata.rdf.sail.webapp.lbs.policy.ganglia.GangliaLBSPolicy: + + Load based proxying for read requests using ganglia. This policy + requires the requires the PlatformStatsPlugIn. In addition, either + gmond must be installed on each node or the embedded GangliaService + must be enabled such that performance metrics are collected and + reported. + + Some of these policies can be further configured using additional + init-param elements that they understand. See the javadoc for the + individual policies for more information. + </description> + </init-param> + </servlet> + <servlet-mapping> + <servlet-name>Load Balancer</servlet-name> + <url-pattern>/LBS/*</url-pattern> + </servlet-mapping> +</web-app> Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml 2014-05-18 15:49:11 UTC (rev 8358) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml 2014-05-19 14:26:12 UTC (rev 8359) @@ -89,102 +89,8 @@ <description>Performance counters.</description> <servlet-class>com.bigdata.rdf.sail.webapp.CountersServlet</servlet-class> <async-supported>true</async-supported> - </servlet><!-- --> - <servlet> - <servlet-name>Load Balancer</servlet-name> - <description> - The HA Load Balancer servlet provides a transparent proxy for - requests arriving its configured URL pattern (the "external" - interface for the load balancer) to the root of the web - application. - - The use of the load balancer is entirely optional. If the - security rules permit, then clients MAY make requests directly - against a specific service. Thus, no specific provision exists - to disable the load balancer servlet, but you may choose not to - deploy it. - - When successfully deployed, requests having prefix corresponding to - the URL pattern for the load balancer are automatically redirected - to a joined service in the met quorum based on the configured load - balancer policy. - - Requests directed to /bigdata/LBS/leader are proxied to the quorum - leader - this URL must be used for non-idempotent requests - (updates). - - Requests directed to /bigdata/LBS/read are load balanced over the - services joined with the met quourm. This URL may only be used - with idempotent requests (reads). - - For non-HA deployments, requests are simply forwarded to the local - service after stripping off the /LBS/leader or /LBS/read prefix. - Thus, it is always safe to use the LBS request URLs. - - The load balancer policies are "HA aware." They will always - redirect update requests to the quorum leader. The default - polices will load balance read requests over the leader and - followers in a manner that reflects the CPU, IO Wait, and GC - Time associated with each service. The PlatformStatsPlugIn - and GangliaPlugIn MUST be enabled for the default load - balancer policy to operate. It depends on those plugins to - maintain a model of the load on the HA replication cluster. - The GangliaPlugIn should be run only as a listener if you are - are running the real gmond process on the host. If you are - not running gmond, then the GangliaPlugIn should be configured - as both a listener and a sender. - </description> - <servlet-class>com.bigdata.rdf.sail.webapp.HALoadBalancerServlet</servlet-class> - <load-on-startup>1</load-on-startup> - <async-supported>true</async-supported> - <init-param> - <param-name>policy</param-name> - <param-value>com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy</param-value> - <description> - The load balancer policy. This must be an instance of the - IHALoadBalancerPolicy interface. A default policy (NOPLBSPolicy) is - used when no value is specified. - - The policies differ ONLY in how they handle READ requests. All policies - proxy updates to the leader. If you do not want update proxying, then - use a URL that does not address the HALoadBalancerServlet. - - The following policies are pre-defined: - - com.bigdata.rdf.sail.webapp.lbs.policy.NOPLBSPolicy: - - Does not load balance read requests. - - com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy: - - Round robin for read requests. - - com.bigdata.rdf.sail.webapp.lbs.policy.counters.CountersLBSPolicy: - - Load based proxying for read requests using the build in http - service for reporting performance counters. This policy requires - the PlatformStatsPlugIn and may also require platform specific - metrics collection dependencies, e.g., sysstat. - - com.bigdata.rdf.sail.webapp.lbs.policy.ganglia.GangliaLBSPolicy: - - Load based proxying for read requests using ganglia. This policy - requires the requires the PlatformStatsPlugIn. In addition, either - gmond must be installed on each node or the embedded GangliaService - must be enabled such that performance metrics are collected and - reported. - - Some of these policies can be further configured using additional - init-param elements that they understand. See the javadoc for the - individual policies for more information. - </description> - </init-param> </servlet> - <servlet-mapping> - <servlet-name>Load Balancer</servlet-name> - <url-pattern>/LBS/*</url-pattern> - </servlet-mapping> - <!-- --> + <!-- Note: The HALoadBalancerServlet is deployed from override-web.xml --> <!-- Serve anything under /html/* as a simple file. --> <servlet-mapping> <servlet-name>default</servlet-name> Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-18 15:49:11 UTC (rev 8358) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-19 14:26:12 UTC (rev 8359) @@ -142,14 +142,12 @@ <Arg> <!-- This is the bigdata web application. --> <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> - <Set name="war"> - <!-- The location of the top-level of the bigdata webapp. --> - <SystemProperty name="jetty.resourceBase" default="bigdata-war/src" /> - </Set> - <Set name="contextPath">/bigdata</Set> + <Set name="war"><SystemProperty name="jetty.resourceBase" default="bigdata-war/src"/></Set> + <Set name="contextPath">/bigdata</Set> <Set name="descriptor">WEB-INF/web.xml</Set> <Set name="parentLoaderPriority">true</Set> <Set name="extractWAR">false</Set> + <Set name="overrideDescriptor"><SystemProperty name="jetty.overrideWebXml" default="bigdata-war/src/WEB-INF/override-web.xml"/></Set> </New> </Arg> </Call> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-18 15:49:13
|
Revision: 8358 http://sourceforge.net/p/bigdata/code/8358 Author: thompsonbry Date: 2014-05-18 15:49:11 +0000 (Sun, 18 May 2014) Log Message: ----------- updating java version to 1.7 for cluster build in build.properties. 1.7 is now required to compile the code base. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.properties Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-05-18 15:48:10 UTC (rev 8357) +++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-05-18 15:49:11 UTC (rev 8358) @@ -158,7 +158,7 @@ # dumps with Sun jdk1.6.0_07 and FC6 on a 32-bit platform. # #JAVA_HOME=C:\\Program Files\\Java\\jdk1.6.0_10 -JAVA_HOME=/usr/java/jdk1.6.0_27 +JAVA_HOME=/usr/java/jdk1.7.0_25 #JAVA_HOME=/usr/lib/jvm/java-1.6.0-openjdk-1.6.0.0.x86_64 #JAVA_HOME=/usr/java/jrockit-R27.3.0-jdk1.6.0_01 This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-18 15:48:13
|
Revision: 8357 http://sourceforge.net/p/bigdata/code/8357 Author: thompsonbry Date: 2014-05-18 15:48:10 +0000 (Sun, 18 May 2014) Log Message: ----------- removed references to README-JINI and overview.html in the SVN root. These files were aged and have been removed. Their absence was breaking the cluster installer. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.xml Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-18 14:55:58 UTC (rev 8356) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-18 15:48:10 UTC (rev 8357) @@ -393,14 +393,16 @@ <!-- Note: the javadoc requires a LOT of RAM, but runs quickly on a server class machine. - @todo man page for [bigdata] script to @{build.dir}/docs + TODO man page for [bigdata] script to @{build.dir}/docs + TODO: New overview page. Old one was very dated and has been removed. + + overview="${bigdata.dir}/overview.html" --> <target name="javadoc" depends="prepare" if="javadoc"> <mkdir dir="${build.dir}/docs/api" /> <javadoc destdir="${build.dir}/docs/api" defaultexcludes="yes" author="true" version="true" use="true" verbose="no" - overview="${bigdata.dir}/overview.html" windowtitle="bigdata® v${build.ver}" classpathref="build.classpath" encoding="utf-8" @@ -675,8 +677,6 @@ <!-- @todo cleanup LEGAL into one directory off the root in the src tree? --> <fileset dir="${bigdata.dir}"> <include name="LICENSE.txt" /> - <include name="overview.html" /> - <include name="README-JINI" /> <include name="bigdata/LEGAL/*" /> <include name="bigdata-jini/LEGAL/*" /> <include name="bigdata-rdf/LEGAL/*" /> @@ -1474,8 +1474,6 @@ <copy tofile="${build.dir}/build.properties" file="build.properties" /> <copy tofile="${build.dir}/build.xml" file="build.xml" /> <copy tofile="${build.dir}/LICENSE.txt" file="LICENSE.txt" /> - <copy tofile="${build.dir}/overview.html" file="overview.html" /> - <copy tofile="${build.dir}/README-JINI" file="README-JINI" /> <copy toDir="${build.dir}/LEGAL" flatten="true"> <fileset dir="${bigdata.dir}"> <include name="**/LEGAL/*" /> @@ -1497,8 +1495,6 @@ <include name="build.xml" /> <include name="LICENSE.txt" /> <include name="NOTICE" /> - <include name="overview.html" /> - <include name="README-JINI" /> <include name="**/LEGAL/*" /> <include name="bigdata/src/**" /> @@ -1572,8 +1568,6 @@ <copy toDir="${install.doc.dir}"> <fileset dir="${bigdata.dir}"> <include name="LICENSE.txt" /> - <include name="overview.html" /> - <include name="README-JINI" /> <include name="bigdata/LEGAL/*" /> <include name="bigdata-jini/LEGAL/*" /> <include name="bigdata-rdf/LEGAL/*" /> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-18 14:56:00
|
Revision: 8356 http://sourceforge.net/p/bigdata/code/8356 Author: thompsonbry Date: 2014-05-18 14:55:58 +0000 (Sun, 18 May 2014) Log Message: ----------- attempting workaround for SystemProperty vs Property WARN : 634 2014-05-18 10:52:01,945 com.bigdata.journal.jini.ha.HAJournalServer org.eclipse.jetty.xml.XmlConfiguration$JettyXmlConfiguration.configure(XmlConfiguration.java:440): Config error at <Set name="minThreads" type="int"><SystemProperty name="jetty.threads.min" default="10"/></Set> WARN : 634 2014-05-18 10:52:01,945 com.bigdata.journal.jini.ha.HAJournalServer org.eclipse.jetty.xml.XmlConfiguration$JettyXmlConfiguration.configure(XmlConfiguration.java:440): Config error at <Get name="ThreadPool"><Set name="minThreads" type="int"><SystemProperty name="jetty.threads.min" default="10"/></Set><Set name="maxThreads" type="int"><SystemProperty name="jetty.threads.max" default="64"/></Set><Set name="idleTimeout" type="int"><SystemProperty name="jetty.threads.timeout" default="60000"/></Set><Set name="detailedDump">false</Set></Get> Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-18 14:51:12 UTC (rev 8355) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-18 14:55:58 UTC (rev 8356) @@ -23,9 +23,9 @@ <!-- =========================================================== --> <Arg name="threadpool"><New id="threadpool" class="org.eclipse.jetty.util.thread.QueuedThreadPool"/></Arg> <Get name="ThreadPool"> - <Set name="minThreads" type="int"><SystemProperty name="jetty.threads.min" default="10"/></Set> - <Set name="maxThreads" type="int"><SystemProperty name="jetty.threads.max" default="64"/></Set> - <Set name="idleTimeout" type="int"><SystemProperty name="jetty.threads.timeout" default="60000"/></Set> + <Set name="minThreads" type="int"><Property name="jetty.threads.min" default="10"/></Set> + <Set name="maxThreads" type="int"><Property name="jetty.threads.max" default="64"/></Set> + <Set name="idleTimeout" type="int"><Property name="jetty.threads.timeout" default="60000"/></Set> <Set name="detailedDump">false</Set> </Get> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-18 14:51:19
|
Revision: 8355 http://sourceforge.net/p/bigdata/code/8355 Author: thompsonbry Date: 2014-05-18 14:51:12 +0000 (Sun, 18 May 2014) Log Message: ----------- bug fix to startHAServices (quote in wrong place) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-18 14:31:31 UTC (rev 8354) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-18 14:51:12 UTC (rev 8355) @@ -75,7 +75,7 @@ "-Djetty.port=${JETTY_PORT}"\ "-Djetty.threads.min=${JETTY_THREADS_MIN}"\ "-Djetty.threads.max=${JETTY_THREADS_MAX}"\ - "-Djetty.threads.timeout=${JETTY_THREADS_TIMEOUT}\" + "-Djetty.threads.timeout=${JETTY_THREADS_TIMEOUT}"\ "-Djetty.resourceBase=${JETTY_RESOURCE_BASE}"\ "-DJETTY_XML=${JETTY_XML}"\ -DCOLLECT_QUEUE_STATISTICS=${COLLECT_QUEUE_STATISTICS}\ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-18 14:31:38
|
Revision: 8354 http://sourceforge.net/p/bigdata/code/8354 Author: thompsonbry Date: 2014-05-18 14:31:31 +0000 (Sun, 18 May 2014) Log Message: ----------- updated release notes for 1.3.1 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_1.txt Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_1.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_1.txt 2014-05-18 13:35:40 UTC (rev 8353) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_1.txt 2014-05-18 14:31:31 UTC (rev 8354) @@ -16,16 +16,17 @@ You can checkout this release from: -https://bigdata.svn.sourceforge.net/svnroot/bigdata/tags/BIGDATA_RELEASE_1_3_1 +https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_1 New features: +- Java 7 is now required. - High availability [10]. -- HA Load balancer. -- New workbench. +- High availability load balancer. +- New RDF/SPARQL workbench. - Blueprints API. +- RDF Graph Mining Service (GASService) [12]. - Reification Done Right (RDR) support [11]. -- RDF Graph Mining Service (GASService) [12]. - Property Path performance enhancements. - Plus numerous other bug fixes and performance enhancements. @@ -54,7 +55,81 @@ 1.3.1: -TBD +- http://trac.bigdata.com/ticket/242 (Deadlines do not play well with GROUP_BY, ORDER_BY, etc.) +- http://trac.bigdata.com/ticket/256 (Amortize RTO cost) +- http://trac.bigdata.com/ticket/257 (Support BOP fragments in the RTO.) +- http://trac.bigdata.com/ticket/258 (Integrate RTO into SAIL) +- http://trac.bigdata.com/ticket/259 (Dynamically increase RTO sampling limit.) +- http://trac.bigdata.com/ticket/526 (Reification done right) +- http://trac.bigdata.com/ticket/580 (Problem with the bigdata RDF/XML parser with sids) +- http://trac.bigdata.com/ticket/622 (NSS using jetty+windows can lose connections (windows only; jdk 6/7 bug)) +- http://trac.bigdata.com/ticket/624 (HA Load Balancer) +- http://trac.bigdata.com/ticket/629 (Graph processing API) +- http://trac.bigdata.com/ticket/721 (Support HA1 configurations) +- http://trac.bigdata.com/ticket/730 (Allow configuration of embedded NSS jetty server using jetty-web.xml) +- http://trac.bigdata.com/ticket/759 (multiple filters interfere) +- http://trac.bigdata.com/ticket/763 (Stochastic results with Analytic Query Mode) +- http://trac.bigdata.com/ticket/774 (Converge on Java 7.) +- http://trac.bigdata.com/ticket/779 (Resynchronization of socket level write replication protocol (HA)) +- http://trac.bigdata.com/ticket/780 (Incremental or asynchronous purge of HALog files) +- http://trac.bigdata.com/ticket/782 (Wrong serialization version) +- http://trac.bigdata.com/ticket/784 (Describe Limit/offset don't work as expected) +- http://trac.bigdata.com/ticket/787 (Update documentations and samples, they are OUTDATED) +- http://trac.bigdata.com/ticket/788 (Name2Addr does not report all root causes if the commit fails.) +- http://trac.bigdata.com/ticket/789 (ant task to build sesame fails, docs for setting up bigdata for sesame are ancient) +- http://trac.bigdata.com/ticket/790 (should not be pruning any children) +- http://trac.bigdata.com/ticket/791 (Clean up query hints) +- http://trac.bigdata.com/ticket/793 (Explain reports incorrect value for opCount) +- http://trac.bigdata.com/ticket/796 (Filter assigned to sub-query by query generator is dropped from evaluation) +- http://trac.bigdata.com/ticket/797 (add sbt setup to getting started wiki) +- http://trac.bigdata.com/ticket/798 (Solution order not always preserved) +- http://trac.bigdata.com/ticket/799 (mis-optimation of quad pattern vs triple pattern) +- http://trac.bigdata.com/ticket/802 (Optimize DatatypeFactory instantiation in DateTimeExtension) +- http://trac.bigdata.com/ticket/803 (prefixMatch does not work in full text search) +- http://trac.bigdata.com/ticket/804 (update bug deleting quads) +- http://trac.bigdata.com/ticket/806 (Incorrect AST generated for OPTIONAL { SELECT }) +- http://trac.bigdata.com/ticket/808 (Wildcard search in bigdata for type suggessions) +- http://trac.bigdata.com/ticket/810 (Expose GAS API as SPARQL SERVICE) +- http://trac.bigdata.com/ticket/815 (RDR query does too much work) +- http://trac.bigdata.com/ticket/816 (Wildcard projection ignores variables inside a SERVICE call.) +- http://trac.bigdata.com/ticket/817 (Unexplained increase in journal size) +- http://trac.bigdata.com/ticket/821 (Reject large files, rather then storing them in a hidden variable) +- http://trac.bigdata.com/ticket/831 (UNION with filter issue) +- http://trac.bigdata.com/ticket/841 (Using "VALUES" in a query returns lexical error) +- http://trac.bigdata.com/ticket/848 (Fix SPARQL Results JSON writer to write the RDR syntax) +- http://trac.bigdata.com/ticket/849 (Create writers that support the RDR syntax) +- http://trac.bigdata.com/ticket/851 (RDR GAS interface) +- http://trac.bigdata.com/ticket/852 (RemoteRepository.cancel() does not consume the HTTP response entity.) +- http://trac.bigdata.com/ticket/853 (Follower does not accept POST of idempotent operations (HA)) +- http://trac.bigdata.com/ticket/854 (Allow override of maximum length before converting an HTTP GET to an HTTP POST) +- http://trac.bigdata.com/ticket/855 (AssertionError: Child does not have persistent identity) +- http://trac.bigdata.com/ticket/862 (Create parser for JSON SPARQL Results) +- http://trac.bigdata.com/ticket/863 (HA1 commit failure) +- http://trac.bigdata.com/ticket/866 (Batch remove API for the SAIL) +- http://trac.bigdata.com/ticket/867 (NSS concurrency problem with list namespaces and create namespace) +- http://trac.bigdata.com/ticket/869 (HA5 test suite) +- http://trac.bigdata.com/ticket/872 (Full text index range count optimization) +- http://trac.bigdata.com/ticket/874 (FILTER not applied when there is UNION in the same join group) +- http://trac.bigdata.com/ticket/876 (When I upload a file I want to see the filename.) +- http://trac.bigdata.com/ticket/877 (RDF Format selector is invisible) +- http://trac.bigdata.com/ticket/883 (CANCEL Query fails on non-default kb namespace on HA follower.) +- http://trac.bigdata.com/ticket/886 (Provide workaround for bad reverse DNS setups.) +- http://trac.bigdata.com/ticket/887 (BIND is leaving a variable unbound) +- http://trac.bigdata.com/ticket/892 (HAJournalServer does not die if zookeeper is not running) +- http://trac.bigdata.com/ticket/893 (large sparql insert optimization slow?) +- http://trac.bigdata.com/ticket/894 (unnecessary synchronization) +- http://trac.bigdata.com/ticket/895 (stack overflow in populateStatsMap) +- http://trac.bigdata.com/ticket/902 (Update Basic Bigdata Chef Cookbook) +- http://trac.bigdata.com/ticket/904 (AssertionError: PropertyPathNode got to ASTJoinOrderByType.optimizeJoinGroup) +- http://trac.bigdata.com/ticket/905 (unsound combo query optimization: union + filter) +- http://trac.bigdata.com/ticket/906 (DC Prefix Button Appends "</li>") +- http://trac.bigdata.com/ticket/907 (Add a quick-start ant task for the BD Server "ant start") +- http://trac.bigdata.com/ticket/912 (Provide a configurable IAnalyzerFactory) +- http://trac.bigdata.com/ticket/913 (Blueprints API Implementation) +- http://trac.bigdata.com/ticket/914 (Settable timeout on SPARQL Query (REST API)) +- http://trac.bigdata.com/ticket/915 (DefaultAnalyzerFactory issues) +- http://trac.bigdata.com/ticket/920 (Content negotiation orders accept header scores in reverse) +- http://trac.bigdata.com/ticket/939 (NSS does not start from command line: bigdata-war/src not found.) 1.3.0: This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-18 13:35:44
|
Revision: 8353 http://sourceforge.net/p/bigdata/code/8353 Author: thompsonbry Date: 2014-05-18 13:35:40 +0000 (Sun, 18 May 2014) Log Message: ----------- Added test for #887 - ticket is closed. Problem can not be demonstrated against the current code base. Suspect was fixed for the 1.3.0 release (heisenbug). Javadoc update for jetty.resourceBase for the NSS. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket887.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.trig Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java 2014-05-17 17:16:32 UTC (rev 8352) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java 2014-05-18 13:35:40 UTC (rev 8353) @@ -128,6 +128,7 @@ // Test suite for SPARQL 1.1 BINDINGS clause suite.addTestSuite(TestBindings.class); suite.addTestSuite(TestBindHeisenbug708.class); + suite.addTestSuite(TestTicket887.class); // Complex queries. suite.addTestSuite(TestComplexQuery.class); Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket887.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket887.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket887.java 2014-05-18 13:35:40 UTC (rev 8353) @@ -0,0 +1,78 @@ +/** + +Copyright (C) SYSTAP, LLC 2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sparql.ast.eval; + + +/** + * Test suite for a hesienbug involving BIND. Unlike the other issues this + * sometimes happens, and is sometimes OK, so we run the test in a loop 20 + * times. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/708"> + * Heisenbug </a> + * + * @version $Id$ + */ +public class TestTicket887 extends AbstractDataDrivenSPARQLTestCase { + + public TestTicket887() { + } + + public TestTicket887(String name) { + super(name); + } + + /** + * <pre> + * SELECT * + * WHERE { + * + * GRAPH ?g { + * + * BIND( "hello" as ?hello ) . + * BIND( CONCAT(?hello, " world") as ?helloWorld ) . + * + * ?member a ?class . + * + * } + * + * } + * LIMIT 1 + * </pre> + * + * @see <a href="http://trac.bigdata.com/ticket/887" > BIND is leaving a + * variable unbound </a> + */ + public void test_ticket_887_bind() throws Exception { + + new TestHelper( + "ticket_887_bind", // testURI, + "ticket_887_bind.rq",// queryFileURL + "ticket_887_bind.trig",// dataFileURL + "ticket_887_bind.srx"// resultFileURL + ).runTest(); + + } + +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.rq 2014-05-18 13:35:40 UTC (rev 8353) @@ -0,0 +1,14 @@ +SELECT * +WHERE { + + GRAPH ?g { + + BIND( "hello" as ?hello ) . + BIND( CONCAT(?hello, " world") as ?helloWorld ) . + + ?member a ?class . + + } + +} +LIMIT 1 \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.srx 2014-05-18 13:35:40 UTC (rev 8353) @@ -0,0 +1,32 @@ +<?xml version="1.0"?> +<sparql + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" + xmlns="http://www.w3.org/2005/sparql-results#" > + <head> + <variable name="?hello"/> + <variable name="?helloWorld"/> + <variable name="?member"/> + <variable name="?class"/> + <variable name="?g"/> + </head> + <results> + <result> + <binding name="hello"> + <literal>hello</literal> + </binding> + <binding name="helloWorld"> + <literal>hello world</literal> + </binding> + <binding name="member"> + <uri>http://www.bigdata.com/member</uri> + </binding> + <binding name="class"> + <uri>http://www.bigdata.com/cls</uri> + </binding> + <binding name="g"> + <uri>http://www.bigdata.com/</uri> + </binding> + </result> + </results> +</sparql> \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.trig =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.trig (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.trig 2014-05-18 13:35:40 UTC (rev 8353) @@ -0,0 +1,6 @@ +@prefix : <http://www.bigdata.com/> . +@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . + +: { + :member a :cls +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-17 17:16:32 UTC (rev 8352) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-18 13:35:40 UTC (rev 8353) @@ -152,25 +152,39 @@ * environment variable will be used to locate the web application.</li> * <li> * <code>jetty.resourceBase</code> is not specified (either - * <code>null</code> or whitespace). An attempt is made to locate the - * <code>bigdata-war/src</code> resource in the file system (relative to - * the current working directory). If found, the - * <code>jetty.resourceBase</code> environment variable is set to this - * resource using a <code>file:</code> style URL. This will cause jetty - * to use the web application directory in the file system. - * <p> - * If the resource is not found in the file system, then an attempt is - * made to locate that resource using the classpath. If found, the the - * <code>jetty.resourceBase</code> is set to the URL for the located - * resource. This will cause jetty to use the web application resource - * on the classpath. If there are multiple such resources on the - * classpath, the first such resource will be discovered and used.</li> + * <code>null</code> or whitespace). + * <ol> + * <li>An attempt is made to locate the <code>bigdata-war/src</code> + * resource in the file system (relative to the current working + * directory). If found, the <code>jetty.resourceBase</code> environment + * variable is set to this resource using a <code>file:</code> style + * URL. This will cause jetty to use the web application directory in + * the file system.</li> * <li> + * An attempt is made to locate the resource + * <code>/WEB-INF/web.xml</code> using the classpath (this handles the + * case when running under the eclipse IDE). If found, the the + * <code>jetty.resourceBase</code> is set to the URL formed by removing + * the trailing <code>WEB-INF/web.xml</code> for the located resource. + * This will cause jetty to use the web application resource on the + * classpath. If there are multiple such resources on the classpath, the + * first such resource will be discovered and used.</li> + * <li>An attempt is made to locate the resource + * <code>bigdata-war/src/WEB-INF/web.xml</code> using the classpath + * (this handles the case when running from the command line using a + * bigdata JAR). If found, the the <code>jetty.resourceBase</code> is + * set to the URL formed by the trailing <code>WEB-INF/web.xml</code> + * for the located resource. This will cause jetty to use the web + * application resource on the classpath. If there are multiple such + * resources on the classpath, the first such resource will be + * discovered and used.</li> + * <li> * Otherwise, the <code>jetty.resourceBase</code> environment variable * is not modified and the default location specified in the * <code>jetty.xml</code> file will be used. If jetty is unable to * resolve that resource, then the web application will not start.</li> * </ol> + * </ol> * * @see <a href="http://trac.bigdata.com/ticket/939" > NSS does not * start from command line: bigdata-war/src not found </a> @@ -825,7 +839,7 @@ } if (tmp != null) { if (src != null) { - if (log.isInfoEnabled()) + if(log.isInfoEnabled()) log.info("Found: src=" + src + ", url=" + tmp); } final String s = tmp.toExternalForm(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-17 17:16:36
|
Revision: 8352 http://sourceforge.net/p/bigdata/code/8352 Author: thompsonbry Date: 2014-05-17 17:16:32 +0000 (Sat, 17 May 2014) Log Message: ----------- Further modifications to the NSS to start correctly with the embedded web app found along the classpath. This has been tested locally. We actually need to check different locations for eclipse and a deployed instance using a jar. See #939 (NSS class path) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-17 16:08:21 UTC (rev 8351) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-17 17:16:32 UTC (rev 8352) @@ -176,7 +176,7 @@ * start from command line: bigdata-war/src not found </a> */ String JETTY_RESOURCE_BASE = "jetty.resourceBase"; - + } /** @@ -767,20 +767,74 @@ * jetty.resourceBase not declared in the environment. */ - // default location: TODO To DEFAULT_JETTY_RESOURCE_BASE - resourceBaseStr = "./bigdata-war/src"; + // The default location to check in the file system. + final File file = new File("bigdata-war/src"); final URL resourceBaseURL; - if (new File(resourceBaseStr).exists()) { + if (file.exists()) { // Check the file system. - resourceBaseURL = new URL("file:" + resourceBaseStr); + resourceBaseURL = new URL("file:" + file.getAbsolutePath()); isFile = true; } else { - // Check the classpath. - resourceBaseURL = classLoader.getResource(resourceBaseStr); + /* + * Check the classpath. + * + * Note: When checking the classpath we need to test different + * resources depending on whether we are running under the + * eclipse IDE or at the command line! + */ + URL tmp = null; + String src = null; + if (tmp == null) { + /** + * Eclipse IDE class path. + * + * Note: This is what gets found when running under eclipse. + * The URL will be in the configured build directory for the + * eclipse project. So, something like: + * + * <pre> + * file:/Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_NEW_SVN/bin/WEB-INF/web.xml + * </pre> + */ + tmp = classLoader.getResource(src = "/WEB-INF/web.xml"); + } +// if (tmp == null)// Eclipse IDE class path (system class loader). +// tmp = ClassLoader.getSystemClassLoader().getResource( +// src = "WEB-INF/web.xml"); +// if (tmp == null) +// tmp = classLoader // JAR class path. +// .getResource(src = "/bigdata-war/src/WEB-INF/web.xml"); + if (tmp == null) { + /** + * JAR class path (system class loader). + * + * Note: This is what gets located when we run from the + * command line (outside of eclipse). The resulting JAR URL + * will be something like: + * + * <pre> + * jar:file:/Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_NEW_SVN/ant-build/lib/bigdata-1.3.0-20140517.jar!/bigdata-war/src/WEB-INF/web.xml + * </pre> + */ + tmp = ClassLoader.getSystemClassLoader().getResource( + src = "bigdata-war/src/WEB-INF/web.xml"); + } + if (tmp != null) { + if (src != null) { + if (log.isInfoEnabled()) + log.info("Found: src=" + src + ", url=" + tmp); + } + final String s = tmp.toExternalForm(); + final int endIndex = s.lastIndexOf("WEB-INF/web.xml"); + final String t = s.substring(0, endIndex); + resourceBaseURL = new URL(t); + } else { + resourceBaseURL = null; + } isClassPath = resourceBaseURL != null; } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java 2014-05-17 16:08:21 UTC (rev 8351) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java 2014-05-17 17:16:32 UTC (rev 8352) @@ -615,6 +615,9 @@ // Setup test suite final Test test = createTestSuite(null/* name */, requestURI); + System.out.println("Running health check: Request-URI=" + + requestURI); + // Run the test suite. test.run(result); @@ -623,7 +626,8 @@ } final String msg = "nerrors=" + result.errorCount() + ", nfailures=" - + result.failureCount() + ", nrun=" + result.runCount(); + + result.failureCount() + ", nrun=" + result.runCount() + + " : Request-URI=" + requestURI; System.out.println(msg); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-17 16:08:26
|
Revision: 8351 http://sourceforge.net/p/bigdata/code/8351 Author: thompsonbry Date: 2014-05-17 16:08:21 +0000 (Sat, 17 May 2014) Log Message: ----------- Working on #939 (NSS does not start from command line: bigdata-war/src not found). NanoSparqlServer: code has been modified to explicitly search (if jetty.resourceBase is not defined) (a) the local file system; (b) the classpath; and then (c) default to whatever is the default value in jetty.xml for the jetty.resourceBase property. TestNSSHealthCheck: added a basic test suite for checking the health of an NSS instance once deployed. This is a starting point for CI based tests of the various deployment models. build.xml: modified to illustrate a possible way of performing the CI deployment tests. More needs to be done here! Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml branches/BIGDATA_RELEASE_1_3_0/build.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-17 16:04:33 UTC (rev 8350) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-17 16:08:21 UTC (rev 8351) @@ -25,6 +25,7 @@ import java.io.File; import java.io.InputStream; +import java.net.MalformedURLException; import java.net.URL; import java.util.LinkedHashMap; import java.util.Map; @@ -122,6 +123,60 @@ */ String JETTY_DUMP_START = "jetty.dump.start"; + /** + * This property specifies the resource path for the web application. In + * order for this mechanism to work, the <code>jetty.xml</code> file + * MUST contain a line which allows the resourceBase of the web + * application to be set from an environment variable. For example: + * + * <pre> + * <SystemProperty name="jetty.resourceBase" default="bigdata-war/src" /> + * </pre> + * + * The <code>jetty.resourceBase</code> variable may identify either a + * file or a resource on the class path. To force the use of the web + * application embedded within the <code>bigdata.jar</code> you need to + * specify a JAR URL along the following lines (using the appropriate + * file path and jar name and version: + * + * <pre> + * jar:file:../lib/bigdata-1.3.0.jar!/bigdata-war/src + * </pre> + * + * The use of absolute file paths are recommended for reliable + * resolution. + * <p> + * The order of preference is: + * <ol> + * <li><code>jetty.resourceBase</code> is specified. The value of this + * environment variable will be used to locate the web application.</li> + * <li> + * <code>jetty.resourceBase</code> is not specified (either + * <code>null</code> or whitespace). An attempt is made to locate the + * <code>bigdata-war/src</code> resource in the file system (relative to + * the current working directory). If found, the + * <code>jetty.resourceBase</code> environment variable is set to this + * resource using a <code>file:</code> style URL. This will cause jetty + * to use the web application directory in the file system. + * <p> + * If the resource is not found in the file system, then an attempt is + * made to locate that resource using the classpath. If found, the the + * <code>jetty.resourceBase</code> is set to the URL for the located + * resource. This will cause jetty to use the web application resource + * on the classpath. If there are multiple such resources on the + * classpath, the first such resource will be discovered and used.</li> + * <li> + * Otherwise, the <code>jetty.resourceBase</code> environment variable + * is not modified and the default location specified in the + * <code>jetty.xml</code> file will be used. If jetty is unable to + * resolve that resource, then the web application will not start.</li> + * </ol> + * + * @see <a href="http://trac.bigdata.com/ticket/939" > NSS does not + * start from command line: bigdata-war/src not found </a> + */ + String JETTY_RESOURCE_BASE = "jetty.resourceBase"; + } /** @@ -163,7 +218,7 @@ * <dt>-jettyXml</dt> * <dd>The location of the jetty.xml resource that will be used * to start the {@link Server} (default is the file in the JAR). - * * The default will locate the <code>jetty.xml</code> resource + * The default will locate the <code>jetty.xml</code> resource * that is bundled with the JAR. This preserves the historical * behavior. If you want to use a different * <code>jetty.xml</code> file, just override this property on @@ -216,7 +271,11 @@ * use a different jetty.xml file, just override this property on the * command line. */ - String jettyXml = "bigdata-war/src/jetty.xml"; + String jettyXml = System.getProperty(// + SystemProperties.JETTY_XML,// + "bigdata-war/src/jetty.xml"// +// SystemProperties.DEFAULT_JETTY_XML + ); /* * Handle all arguments starting with "-". These should appear before @@ -589,45 +648,26 @@ /* * Configure the jetty Server using a jetty.xml file. In turn, the * jetty.xml file configures the webapp using a web.xml file. The caller - * can override the location of the jetty.xml file if they need to - * change the way in which either jetty or the webapp are configured. - * You can also override many of the properties in the jetty.xml file - * using environment variables. + * can override the location of the jetty.xml file using the [jetty.xml] + * environment variable if they need to change the way in which either + * jetty or the webapp are configured. You can also override many of the + * properties in the [jetty.xml] file using environment variables. For + * example, they can also override the location of the web application + * (including the web.xml file) using the [jetty.resourceBase] + * environment variable. */ final Server server; { - // Locate jetty.xml. - final URL jettyXmlUrl; - if (new File(jettyXml).exists()) { + // Find the effective jetty.xml URL. + final URL jettyXmlURL = getEffectiveJettyXmlURL(classLoader, + jettyXml); - // Check the file system. -// jettyXmlUrl = new File(jettyXml).toURI(); - jettyXmlUrl = new URL("file:" + jettyXml); - - } else { - - // Check the classpath. - jettyXmlUrl = classLoader.getResource(jettyXml); -// jettyXmlUrl = classLoader.getResource("bigdata-war/src/jetty.xml"); - - } - - if (jettyXmlUrl == null) { - - throw new RuntimeException("Not found: " + jettyXml); - - } - - if (log.isInfoEnabled()) - log.info("jetty configuration: jettyXml=" + jettyXml - + ", jettyXmlUrl=" + jettyXmlUrl); - - // Build configuration from that resource. + // Build the server configuration from that jetty.xml resource. final XmlConfiguration configuration; { // Open jetty.xml resource. - final Resource jettyConfig = Resource.newResource(jettyXmlUrl); + final Resource jettyConfig = Resource.newResource(jettyXmlURL); InputStream is = null; try { is = jettyConfig.getInputStream(); @@ -639,65 +679,208 @@ } } } - + + // Configure/apply jetty.resourceBase overrides. + configureEffectiveResourceBase(classLoader); + // Configure the jetty server. server = (Server) configuration.configure(); } /* - * Configure the webapp (overrides, IIndexManager, etc.) + * Configure any overrides for the web application init-params. */ - { + configureWebAppOverrides(server, indexManager, initParams); - final WebAppContext wac = getWebApp(server); + return server; + + } - if (wac == null) { + private static URL getEffectiveJettyXmlURL(final ClassLoader classLoader, + final String jettyXml) throws MalformedURLException { - /* - * This is a fatal error. If we can not set the IIndexManager, - * the NSS will try to interpret the propertyFile in web.xml - * rather than using the one that is already open and specified - * by the caller. Among other things, that breaks the - * HAJournalServer startup. - */ + // Locate jetty.xml. + final URL jettyXmlUrl; + boolean isFile = false; + boolean isClassPath = false; + if (new File(jettyXml).exists()) { - throw new RuntimeException("Could not locate " - + WebAppContext.class.getName()); + // Check the file system. + // jettyXmlUrl = new File(jettyXml).toURI(); + jettyXmlUrl = new URL("file:" + jettyXml); + isFile = true; - } + } else { + // Check the classpath. + jettyXmlUrl = classLoader.getResource(jettyXml); + // jettyXmlUrl = + // classLoader.getResource("bigdata-war/src/jetty.xml"); + isClassPath = true; + + } + + if (jettyXmlUrl == null) { + + throw new RuntimeException("Not found: " + jettyXml); + + } + + if (log.isInfoEnabled()) + log.info("jetty configuration: jettyXml=" + jettyXml + ", isFile=" + + isFile + ", isClassPath=" + isClassPath + + ", jettyXmlUrl=" + jettyXmlUrl); + + return jettyXmlUrl; + + } + + /** + * Search (a) the local file system; and (b) the classpath for the web + * application. If the resource is located, then set the + * [jetty.resourceBase] property. This search sequence gives preference to + * the local file system and then searches the classpath (which jetty does + * not known how to do by itself.) + * + * @throws MalformedURLException + * + * @see <a href="http://trac.bigdata.com/ticket/939" > NSS does not start + * from command line: bigdata-war/src not found </a> + */ + private static void configureEffectiveResourceBase( + final ClassLoader classLoader) throws MalformedURLException { + + // Check the environment variable. + String resourceBaseStr = System + .getProperty(SystemProperties.JETTY_RESOURCE_BASE); + + // true iff declared as an environment variable. + final boolean isDeclared = resourceBaseStr != null + && resourceBaseStr.trim().length() > 0; + boolean isFile = false; // iff found in local file system. + boolean isClassPath = false; // iff found on classpath. + + if (!isDeclared) { + /* - * Force the use of the caller's IIndexManager. This is how we get the - * NSS to use the already open Journal for the HAJournalServer. + * jetty.resourceBase not declared in the environment. */ - if (indexManager != null) { - // Set the IIndexManager attribute on the WebAppContext. - wac.setAttribute(IIndexManager.class.getName(), indexManager); - + // default location: TODO To DEFAULT_JETTY_RESOURCE_BASE + resourceBaseStr = "./bigdata-war/src"; + + final URL resourceBaseURL; + if (new File(resourceBaseStr).exists()) { + + // Check the file system. + resourceBaseURL = new URL("file:" + resourceBaseStr); + isFile = true; + + } else { + + // Check the classpath. + resourceBaseURL = classLoader.getResource(resourceBaseStr); + isClassPath = resourceBaseURL != null; + } - - /* - * Note: You simply can not override the init parameters specified - * in web.xml. Therefore, this sets the overrides on an attribute. - * The attribute is then consulted when the web app starts and its - * the override values are used if given. - */ - if (initParams != null) { - wac.setAttribute( - BigdataRDFServletContextListener.INIT_PARAM_OVERRIDES, - initParams); + if (resourceBaseURL != null) { + /* + * We found the resource either in the file system or in the + * classpath. + * + * Explicitly set the discovered value on the jetty.resourceBase + * property. This will cause jetty to use the version of that + * resource that we discovered above. + * + * Note: If we did not find the resource, then the default value + * from the jetty.xml SystemProperty expression will be used by + * jetty. If it can not find a resource using that default + * value, then the startup will fail. We leave this final check + * to jetty itself since it will interpret the jetty.xml file + * itself. + */ + System.setProperty(SystemProperties.JETTY_RESOURCE_BASE, + resourceBaseURL.toExternalForm()); + } } - return server; + if (log.isInfoEnabled()) + log.info("jetty configuration"// + + ": resourceBaseStr=" + resourceBaseStr + + ", isDeclared=" + + isDeclared + ", isFile=" + isFile + + ", isClassPath=" + + isClassPath + + ", jetty.resourceBase(effective)=" + + System.getProperty(SystemProperties.JETTY_RESOURCE_BASE)); } + + /** + * Configure the webapp (overrides, IIndexManager, etc.) + * <p> + * Note: These overrides are achieved by setting the {@link WebAppContext} + * attribute named + * {@link BigdataRDFServletContextListener#INIT_PARAM_OVERRIDES}. The + * {@link BigdataRDFServletContextListener} then consults the attribute when + * reporting the effective value of the init-params. This convoluted + * mechanism is required because you can not otherwise override the + * init-params without editing <code>web.xml</code>. + */ + private static void configureWebAppOverrides(// + final Server server,// + final IIndexManager indexManager,// + final Map<String, String> initParams// + ) { + final WebAppContext wac = getWebApp(server); + + if (wac == null) { + + /* + * This is a fatal error. If we can not set the IIndexManager, the + * NSS will try to interpret the propertyFile in web.xml rather than + * using the one that is already open and specified by the caller. + * Among other things, that breaks the HAJournalServer startup. + */ + + throw new RuntimeException("Could not locate " + + WebAppContext.class.getName()); + + } + + /* + * Force the use of the caller's IIndexManager. This is how we get the + * NSS to use the already open Journal for the HAJournalServer. + */ + if (indexManager != null) { + + // Set the IIndexManager attribute on the WebAppContext. + wac.setAttribute(IIndexManager.class.getName(), indexManager); + + } + + /* + * Note: You simply can not override the init parameters specified in + * web.xml. Therefore, this sets the overrides on an attribute. The + * attribute is then consulted when the web app starts and its the + * override values are used if given. + */ + if (initParams != null) { + + wac.setAttribute( + BigdataRDFServletContextListener.INIT_PARAM_OVERRIDES, + initParams); + + } + + } + /** * Return the {@link WebAppContext} for the {@link Server}. * Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java 2014-05-17 16:08:21 UTC (rev 8351) @@ -0,0 +1,642 @@ +/** +Copyright (C) SYSTAP, LLC 2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sail.webapp.health; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import junit.framework.AssertionFailedError; +import junit.framework.Test; +import junit.framework.TestCase2; +import junit.framework.TestListener; +import junit.framework.TestResult; +import junit.framework.TestSuite; +import junit.textui.ResultPrinter; + +import org.apache.http.HttpEntity; +import org.apache.http.HttpResponse; +import org.apache.http.client.HttpClient; +import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.conn.ClientConnectionManager; +import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.http.impl.client.DefaultRedirectStrategy; +import org.apache.http.util.EntityUtils; + +import com.bigdata.BigdataStatics; +import com.bigdata.rdf.sail.webapp.NanoSparqlServer; +import com.bigdata.rdf.sail.webapp.client.ConnectOptions; +import com.bigdata.rdf.sail.webapp.client.DefaultClientConnectionManagerFactory; +import com.bigdata.rdf.sail.webapp.client.HttpException; +import com.bigdata.rdf.sail.webapp.client.RemoteRepository; +import com.bigdata.rdf.sail.webapp.client.RemoteRepositoryManager; +import com.bigdata.util.concurrent.DaemonThreadFactory; + +/** + * Utility test suite provides a health check for a deployed instance. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class TestNSSHealthCheck extends TestCase2 { + + /** + * A marker placed into index.html so we can recognize when that page is + * served. + */ + private static final String JUNIT_TEST_MARKER_INDEX_HTML = "junit test marker: index.html"; + + /** + * The executor used by the http client. + */ + private ExecutorService executorService; + + /** + * The {@link ClientConnectionManager} for the {@link HttpClient} used by + * the {@link RemoteRepository}. This is used when we tear down the + * {@link RemoteRepository}. + */ + private ClientConnectionManager m_cm; + + /** + * Exposed to tests that do direct HTTP GET/POST operations. + */ + protected HttpClient m_httpClient; + + /** + * The client-API wrapper to the NSS. + */ + protected RemoteRepositoryManager m_repo; + + /** + * The effective {@link NanoSparqlServer} http end point (including the + * ContextPath). + * <pre> + * http://localhost:8080/bigdata -- webapp URL (includes "/bigdata" context path. + * </pre> + */ + protected String m_serviceURL; + + /** + * The URL of the root of the web application server. This does NOT include + * the ContextPath for the webapp. + * + * <pre> + * http://localhost:8080 -- root URL + * </pre> + */ + protected String m_rootURL; + + public TestNSSHealthCheck(final String name) {//, final String requestURI) { + + super(name); + +// m_requestURI = requestURI; + + } + + /** + * FIXME hacked in test suite constructor. + */ + private static String requestURI; + + @Override + protected void setUp() throws Exception { + + super.setUp(); + + m_rootURL = requestURI; + + m_serviceURL = m_rootURL + BigdataStatics.getContextPath(); + + m_cm = DefaultClientConnectionManagerFactory.getInstance() + .newInstance(); + + final DefaultHttpClient httpClient = new DefaultHttpClient(m_cm); + m_httpClient = httpClient; + + /* + * Ensure that the client follows redirects using a standard policy. + * + * Note: This is necessary for tests of the webapp structure since the + * container may respond with a redirect (302) to the location of the + * webapp when the client requests the root URL. + */ + httpClient.setRedirectStrategy(new DefaultRedirectStrategy()); + + executorService = Executors.newCachedThreadPool(DaemonThreadFactory + .defaultThreadFactory()); + + m_repo = new RemoteRepositoryManager(m_serviceURL, m_httpClient, + executorService); + + } + + @Override + protected void tearDown() throws Exception { + + m_rootURL = null; + m_serviceURL = null; + + if (m_cm != null) { + m_cm.shutdown(); + m_cm = null; + } + + m_httpClient = null; + m_repo = null; + + if (executorService != null) { + executorService.shutdownNow(); + executorService = null; + } + + super.tearDown(); + + } + + static class HealthCheckTestSuite extends TestSuite { + + /** + * The URL of the bigdata web application. + */ + @SuppressWarnings("unused") + private final String requestURI; + + /** + * + * @param name + * @param requestURI + * The URL of the bigdata web application. + */ + private HealthCheckTestSuite(final String name, final String requestURI) { + + super(name); + + this.requestURI = requestURI; + + // FIXME Hacked through static field. + TestNSSHealthCheck.requestURI = requestURI; + + } + + } + + static HealthCheckTestSuite createTestSuite(final String name, + final String requestURI) { + + final HealthCheckTestSuite suite = new HealthCheckTestSuite(name, + requestURI); + + suite.addTestSuite(TestNSSHealthCheck.class); + + return suite; + + } + + /** + * bare URL of the server + * + * <pre> + * http://localhost:8080 + * </pre> + * + * The response is should be <code>index.html</code> since we want the + * bigdata webapp to respond for the top-level context. + * + * <p> + * Note: You must ensure that the client follows redirects using a standard + * policy. This is necessary for tests of the webapp structure since the + * container may respond with a redirect (302) to the location of the webapp + * when the client requests the root URL. + */ + public void test_webapp_structure_rootURL() throws Exception { + + final String content = doGET(m_rootURL); + + assertTrue(content.contains(JUNIT_TEST_MARKER_INDEX_HTML)); + + } + + /** + * URL with correct context path + * + * <pre> + * http://localhost:8080/bigdata + * </pre> + * + * The response is should be <code>index.html</code>, which is specified + * through the welcome files list. + */ + public void test_webapp_structure_contextPath() throws Exception { + + final String content = doGET(m_serviceURL); + + assertTrue(content.contains(JUNIT_TEST_MARKER_INDEX_HTML)); + } + + /** + * URL with context path and index.html reference + * + * <pre> + * http://localhost:8080/bigdata/index.html + * </pre> + * + * This URL does NOT get mapped to anything (404). + */ + public void test_webapp_structure_contextPath_indexHtml() throws Exception { + + try { + + doGET(m_serviceURL + "/index.html"); + + } catch (HttpException ex) { + + assertEquals(404, ex.getStatusCode()); + + } + + } + + /** + * The <code>favicon.ico</code> file. + * + * @see <a href="http://www.w3.org/2005/10/howto-favicon"> How to add a + * favicon </a> + */ + public void test_webapp_structure_favicon() throws Exception { + + doGET(m_serviceURL + "/html/favicon.ico"); + + } + + /** + * The <code>/status</code> servlet responds. + */ + public void test_webapp_structure_status() throws Exception { + + doGET(m_serviceURL + "/status"); + + } + + /** + * The <code>/counters</code> servlet responds. + */ + public void test_webapp_structure_counters() throws Exception { + + doGET(m_serviceURL + "/counters"); + + } + +// /** +// * The <code>/namespace/</code> servlet responds (multi-tenancy API). +// */ +// public void test_webapp_structure_namespace() throws Exception { +// +// doGET(m_serviceURL + "/namespace/"); +// +// } + + /** + * The fully qualified URL for <code>index.html</code> + * + * <pre> + * http://localhost:8080/bigdata/html/index.html + * </pre> + * + * The response is should be <code>index.html</code>, which is specified + * through the welcome files list. + */ + public void test_webapp_structure_contextPath_html_indexHtml() throws Exception { + + doGET(m_serviceURL + "/html/index.html"); + } + + private String doGET(final String url) throws Exception { + + HttpResponse response = null; + HttpEntity entity = null; + + try { + + final ConnectOptions opts = new ConnectOptions(url); + opts.method = "GET"; + + response = doConnect(opts); + + checkResponseCode(url, response); + + entity = response.getEntity(); + + final String content = EntityUtils.toString(entity); + + return content; + + } finally { + + try { + EntityUtils.consume(entity); + } catch (IOException ex) { + log.warn(ex, ex); + } + + } + + } + + /** + * Connect to a SPARQL end point (GET or POST query only). + * + * @param opts + * The connection options. + * + * @return The connection. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/619"> + * RemoteRepository class should use application/x-www-form-urlencoded + * for large POST requests </a> + */ + private HttpResponse doConnect(final ConnectOptions opts) throws Exception { + + /* + * Generate the fully formed and encoded URL. + */ + + final StringBuilder urlString = new StringBuilder(opts.serviceURL); + + ConnectOptions.addQueryParams(urlString, opts.requestParams); + + final boolean isLongRequestURL = urlString.length() > 1024; + + if (isLongRequestURL && opts.method.equals("POST") + && opts.entity == null) { + + /* + * URL is too long. Reset the URL to just the service endpoint and + * use application/x-www-form-urlencoded entity instead. Only in + * cases where there is not already a request entity (SPARQL query + * and SPARQL update). + */ + + urlString.setLength(0); + urlString.append(opts.serviceURL); + + opts.entity = ConnectOptions.getFormEntity(opts.requestParams); + + } else if (isLongRequestURL && opts.method.equals("GET") + && opts.entity == null) { + + /* + * Convert automatically to a POST if the request URL is too long. + * + * Note: [opts.entity == null] should always be true for a GET so + * this bit is a paranoia check. + */ + + opts.method = "POST"; + + urlString.setLength(0); + urlString.append(opts.serviceURL); + + opts.entity = ConnectOptions.getFormEntity(opts.requestParams); + + } + + if (log.isDebugEnabled()) { + log.debug("*** Request ***"); + log.debug(opts.serviceURL); + log.debug(opts.method); + log.debug("query=" + opts.getRequestParam("query")); + log.debug(urlString.toString()); + } + + HttpUriRequest request = null; + try { + + request = RemoteRepository.newRequest(urlString.toString(), opts.method); + + if (opts.requestHeaders != null) { + + for (Map.Entry<String, String> e : opts.requestHeaders + .entrySet()) { + + request.addHeader(e.getKey(), e.getValue()); + + if (log.isDebugEnabled()) + log.debug(e.getKey() + ": " + e.getValue()); + + } + + } + +// // conn = doConnect(urlString.toString(), opts.method); +// final URL url = new URL(urlString.toString()); +// conn = (HttpURLConnection) url.openConnection(); +// conn.setRequestMethod(opts.method); +// conn.setDoOutput(true); +// conn.setDoInput(true); +// conn.setUseCaches(false); +// conn.setReadTimeout(opts.timeout); +// conn.setRequestProperty("Accept", opts.acceptHeader); +// if (log.isDebugEnabled()) +// log.debug("Accept: " + opts.acceptHeader); + + if (opts.entity != null) { + +// if (opts.data == null) +// throw new AssertionError(); + +// final String contentLength = Integer.toString(opts.data.length); + +// conn.setRequestProperty("Content-Type", opts.contentType); +// conn.setRequestProperty("Content-Length", contentLength); + +// if (log.isDebugEnabled()) { +// log.debug("Content-Type: " + opts.contentType); +// log.debug("Content-Length: " + contentLength); +// } + +// final ByteArrayEntity entity = new ByteArrayEntity(opts.data); +// entity.setContentType(opts.contentType); + + ((HttpEntityEnclosingRequestBase) request).setEntity(opts.entity); + +// final OutputStream os = conn.getOutputStream(); +// try { +// os.write(opts.data); +// os.flush(); +// } finally { +// os.close(); +// } + + } + + final HttpResponse response = m_httpClient.execute(request); + + return response; + +// // connect. +// conn.connect(); +// +// return conn; + + } catch (Throwable t) { + /* + * If something goes wrong, then close the http connection. + * Otherwise, the connection will be closed by the caller. + */ + try { + + if (request != null) + request.abort(); + +// // clean up the connection resources +// if (conn != null) +// conn.disconnect(); + + } catch (Throwable t2) { + // ignored. + } + throw new RuntimeException(opts.serviceURL + " : " + t, t); + } + + } + + /** + * Throw an exception if the status code does not indicate success. + * + * @param response + * The response. + * + * @return The response. + * + * @throws IOException + */ + private static HttpResponse checkResponseCode(final String url, + final HttpResponse response) throws IOException { + + final int rc = response.getStatusLine().getStatusCode(); + + if (rc < 200 || rc >= 300) { + throw new HttpException(rc, "StatusCode=" + rc + ", StatusLine=" + + response.getStatusLine() + ", headers=" + + Arrays.toString(response.getAllHeaders()) + + ", ResponseBody=" + + EntityUtils.toString(response.getEntity())); + + } + + if (log.isDebugEnabled()) { + /* + * write out the status list, headers, etc. + */ + log.debug("*** Response ***"); + log.debug("Status Line: " + response.getStatusLine()); + } + + return response; + + } + + /** + * Connect to the NSS end point and run a test suite designed to verify the + * health of that instance. + * + * @param args + * URL + * + * @throws MalformedURLException + * + * TODO Support HA health checks as well. + */ + public static void main(final String[] args) throws MalformedURLException { + + if (args.length < 1) { + System.err.println("usage: <cmd> Request-URI"); + System.exit(1); + } + + final String requestURI = args[0]; + + // Setup test result. + final TestResult result = new TestResult(); + + // Setup listener, which will write the result on System.out + result.addListener(new ResultPrinter(System.out)); + + result.addListener(new TestListener() { + + @Override + public void startTest(Test arg0) { + log.info(arg0); + } + + @Override + public void endTest(Test arg0) { + log.info(arg0); + } + + @Override + public void addFailure(Test arg0, AssertionFailedError arg1) { + log.error(arg0, arg1); + } + + @Override + public void addError(Test arg0, Throwable arg1) { + log.error(arg0, arg1); + } + }); + + try { + + // Setup test suite + final Test test = createTestSuite(null/* name */, requestURI); + + // Run the test suite. + test.run(result); + + } finally { + + } + + final String msg = "nerrors=" + result.errorCount() + ", nfailures=" + + result.failureCount() + ", nrun=" + result.runCount(); + + System.out.println(msg); + + if (result.errorCount() > 0 || result.failureCount() > 0) { + + // At least one test failed. + System.exit(1); + + } + + // All green. + System.exit(0); + + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-17 16:04:33 UTC (rev 8350) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-17 16:08:21 UTC (rev 8351) @@ -149,7 +149,7 @@ <Set name="contextPath">/bigdata</Set> <Set name="descriptor">WEB-INF/web.xml</Set> <Set name="parentLoaderPriority">true</Set> - <Set name="extractWAR">true</Set> + <Set name="extractWAR">false</Set> </New> </Arg> </Call> Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-17 16:04:33 UTC (rev 8350) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-17 16:08:21 UTC (rev 8351) @@ -1955,6 +1955,9 @@ <property name="app.home" location="${bigdata.dir}" /> + <!-- port for test NSS deployments. --> + <property name="test.NSS.port" value="24444" /> + <property name="test.codebase.port" value="23333" /> <property name="test.codebase.dir" value="${dist.lib.dl}" /> <property name="test.codebase" value="http://${this.hostname}:${test.codebase.port}/jsk-dl.jar" /> @@ -2225,6 +2228,9 @@ <isset property="testName" /> </condition> + <!-- test various deployment models. --> + <antcall target="test-deployments" /> + <!-- Note: timeout is milliseconds per forked JVM if specified. --> <!-- We have some long running unit tests so the timeout needs --> <!-- to be more than a minute if you do specify this property. --> @@ -2433,6 +2439,33 @@ </delete> </target> + <!-- --> + <!-- FIXME Ant targets for testing a variety of deployment models. --> + <!-- --> + <target name="test-deployments" description="Validate deployment models."> + <!-- antcall target="test-nss-default" / --> + </target> + + <!-- TODO Test with -Djetty.xml override. --> + <!-- TODO Test with -Djetty.resourceBase override. --> + <!-- TODO Test with CWD == ${dist.var.jetty} so it will locate the web app in the file system. --> + <target name="test-nss-default"> + <java classname="com.bigdata.rdf.sail.webapp.NanoSparqlServer" + failonerror="true" fork="true" logerror="true"> + <classpath path="${run.class.path}"/> + <jvmarg value="-server"/> + <jvmarg value="-Xmx1G"/> + <jvmarg value="-Dlog4j.configuration=${dist.var.config.logging}log4j.properties"/> + <arg value="${test.NSS.port}"/> + <arg value="kb"/> + <arg value="${dist.var.jetty}/WEB-INF/RWStore.properties"/> + </java> + <!-- TODO Get the PID --> + <!-- TODO Run junit test suite for validation (and integrate into total CI junit summary). --> + <!-- TODO Sure kill using the PID (or use the jetty command to stop the process). --> + <!-- TODO We also need a reliable way to handle this for a remote deploy. --> + </target> + <!-- --> <!-- SESAME SERVER TARGETS --> <!-- --> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-17 16:04:37
|
Revision: 8350 http://sourceforge.net/p/bigdata/code/8350 Author: thompsonbry Date: 2014-05-17 16:04:33 +0000 (Sat, 17 May 2014) Log Message: ----------- Removed import. Suppressed warning. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-16 19:05:43 UTC (rev 8349) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-17 16:04:33 UTC (rev 8350) @@ -104,7 +104,6 @@ import com.bigdata.rdf.sail.webapp.ConfigParams; import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet; import com.bigdata.rdf.sail.webapp.NanoSparqlServer; -import com.bigdata.rdf.sail.webapp.NanoSparqlServer.SystemProperties; import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy; import com.bigdata.rwstore.RWStore; import com.bigdata.service.AbstractHATransactionService; @@ -752,6 +751,7 @@ /* * Zookeeper quorum. */ + @SuppressWarnings({ "unchecked", "rawtypes" }) final Quorum<HAGlue, QuorumService<HAGlue>> quorum = (Quorum) new ZKQuorumImpl<HAGlue, HAQuorumService<HAGlue, HAJournal>>( replicationFactor); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-05-16 19:05:46
|
Revision: 8349 http://sourceforge.net/p/bigdata/code/8349 Author: dmekonnen Date: 2014-05-16 19:05:43 +0000 (Fri, 16 May 2014) Log Message: ----------- Updates for cookbook consolidation. Modified Paths: -------------- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/README.md branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/attributes/default.rb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/mapgraph.rb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/nss.rb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/tomcat.rb Added Paths: ----------- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/high_availability.rb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/ssd.rb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/default/ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/default/bigdataHA.erb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/jetty.xml.erb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/log4jHA.properties.erb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/zoo.cfg.erb Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/README.md =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/README.md 2014-05-16 18:36:34 UTC (rev 8348) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/README.md 2014-05-16 19:05:43 UTC (rev 8349) @@ -178,7 +178,7 @@ License and Authors ------------------- -Author:: Daniel Mekonnen [daniel<o-spam-at>systap.com] +Author:: Daniel Mekonnen [daniel<no-spam-at>systap.com] GNU GPLv2 - This pakcage may be resiributed under the same terms and conditions as the Bigdata project that it is a part of. Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/attributes/default.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/attributes/default.rb 2014-05-16 18:36:34 UTC (rev 8348) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/attributes/default.rb 2014-05-16 19:05:43 UTC (rev 8349) @@ -1,16 +1,23 @@ - +# +# Where bigdata resource files will be installed: +# default['bigdata'][:home] = "/var/lib/bigdata" -# Who runs bigdata? +# +# Who runs bigdata? This is applicable to NSS and HA installs only: +# default['bigdata'][:user] = "bigdata" default['bigdata'][:group] = "bigdata" -default['bigdata'][:properties] = default['bigdata'][:home] + "/RWStore.properties" +# +# When "build_from_svn" is "true", code retrieved from subversion will be downloaded to the "source_dir" directory: +# +default['bigdata'][:source_dir] = "/home/ubuntu/bigdata-code" -default['bigdata'][:source] = "bigdata-code" -case node['bigdata'][:install_type] +case node['bigdata'][:install_flavor] when "nss" + # The URL to the bigdata-nss bundle. The following is the same bundle used by the Bigdata Brew installer: default['bigdata'][:url] = "http://bigdata.com/deploy/bigdata-1.3.0.tgz" # Where the jetty resourceBase is defined: @@ -22,16 +29,24 @@ # Where the bigdata-ha.jnl file will live: default['bigdata'][:data_dir] = node['bigdata'][:home] + "/var/data" + # The subversion branch to use when building from source: if node['bigdata'][:build_from_svn] default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1" end when "tomcat" + # The Tomcat version to install. The Bigdata Chef cookbook has only been tested with Version 7: default['tomcat'][:base_version] = 7 + + # JRE options options to set for Tomcat, the following is strongly recommended: default['tomcat'][:java_options] = "-Djava.awt.headless=true -server -Xmx4G -XX:+UseG1GC" + # A SourceForge URL to use for downloading the bigdata.war file: default['bigdata'][:url] = "http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/1.3.0/bigdata.war" + # Where the bigdata contents reside under Tomcat: default['bigdata'][:web_home] = default['tomcat'][:webapp_dir] + "/bigdata" + + # Where the log4j.properites file can be found: default['bigdata'][:log4j_properties] = default['bigdata'][:web_home] + "/WEB-INF/classes/log4j.properties" # Where the bigdata-ha.jnl file will live: @@ -40,20 +55,59 @@ # Where the log files will live: default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log" + # The subversion branch to use when building from source: if node['bigdata'][:build_from_svn] default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0" end +when "ha" + # + # Presently Bigdata HA can only be deployed from an SVN build so we set the flag to true: + # + default['bigdata'][:build_from_svn] = true + + # The subversion branch to use when building from source: + if node['bigdata'][:build_from_svn] + # default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0" + default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1" + end + + # Where the bigdata-ha.jnl file will live: + default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data" + + # Where the log files will live: + default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log" + + # Where the jetty resourceBase is defined: + default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty" + + # Name of the federation of services (controls the Apache River GROUPS). + default['bigdata'][:fedname] = 'my-cluster-1' + + # Name of the replication cluster to which this HAJournalServer will belong. + default['bigdata'][:logical_service_id] = 'HA-Replication-Cluster-1' + + # Where to find the Apache River service registrars (can also use multicast). + default['bigdata'][:river_locator1] = 'bigdataA' + default['bigdata'][:river_locator2] = 'bigdataB' + default['bigdata'][:river_locator3] = 'bigdataC' + + # Where to find the Apache Zookeeper ensemble. + default['bigdata'][:zk_server1] = 'bigdataA' + default['bigdata'][:zk_server2] = 'bigdataB' + default['bigdata'][:zk_server3] = 'bigdataC' end -############################################################## +################################################################################### # -# Set the RWStore.properties attributes that apply for all -# installation scenarios. +# Set the RWStore.properties attributes that apply for all installation scenarios. # -############################################################## +################################################################################### +# Where the RWStore.properties file can be found: +default['bigdata'][:properties] = default['bigdata'][:home] + "/RWStore.properties" + default['bigdata']['journal.AbstractJournal.bufferMode'] = "DiskRW" # Setup for the RWStore recycler rather than session protection. @@ -66,9 +120,7 @@ default['bigdata']['journal.AbstractJournal.initialExtent'] = "209715200" default['bigdata']['journal.AbstractJournal.maximumExtent'] = "209715200" -## -## Setup for QUADS mode without the full text index. -## +# Setup for QUADS mode without the full text index. default['bigdata']['rdf.sail.truthMaintenance'] = "false" default['bigdata']['rdf.store.AbstractTripleStore.quads'] = "false" default['bigdata']['rdf.store.AbstractTripleStore.statementIdentifiers'] = "false" @@ -81,7 +133,26 @@ # Bump up the branching factor for the statement indices on the default kb. default['bigdata']['namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor'] = "1024" default['bigdata']['rdf.sail.bufferCapacity'] = "100000" -# default['bigdata']['rdf.store.AbstractTripleStore.vocabularyClass'] = "" -default['mapgraph'][:source] = "mapgraph-code" +# +# Bigdata supports over a hundred properties and only the most commonly configured +# are set here as Chef attributes. Any number of additional properties may be +# configured by Chef. To do so, add the desired property in this (attributes/default.rb) +# file as well as in the templates/default/RWStore.properties.erb file. The +# "vocabularyClass" property (below) for inline URIs is used as example additional +# entry: +# +# default['bigdata']['rdf.store.AbstractTripleStore.vocabularyClass'] = "com.my.VocabularyClass" + + +################################################################# +# +# The following attributes are defaults for the MapGraph recipe. +# +################################################################# + +# The subversion branch to use when building from source: default['mapgraph'][:svn_branch] = "https://svn.code.sf.net/p/mpgraph/code/trunk" + +# MapGraph code retrieved from subversion will be downloaded to the "source_dir" directory: +default['mapgraph'][:source_dir] = "/home/ubuntu/mapgraph-code" Added: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/high_availability.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/high_availability.rb (rev 0) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/high_availability.rb 2014-05-16 19:05:43 UTC (rev 8349) @@ -0,0 +1,174 @@ +# +# Cookbook Name:: bigdata +# Recipe:: high_availability +# +# Copyright 2014, Systap +# + +# +# Only do the following for Bigdata HA install +# +if node['bigdata'][:install_flavor] == "ha" + + include_recipe "java" + include_recipe "ant" + include_recipe "sysstat" + include_recipe "subversion::client" + include_recipe "hadoop::zookeeper_server" + + # + # Create the bigdata systm group: + # + group node['bigdata'][:group] do + action :create + append true + end + + # + # Create the bigdata systm user: + # + user node['bigdata'][:user] do + gid node['bigdata'][:group] + supports :manage_home => true + shell "/bin/false" + home node['bigdata'][:home] + system true + action :create + end + + # + # Make sure the Bigdata home directory is owned by the bigdata user and group: + # + execute "change the ownership of the bigdata home directory to bigdata, which strangely is not" do + user "root" + group "root" + cwd node['bigdata'][:home] + command "chown -R #{node['bigdata'][:user]}:#{node['bigdata'][:group]} ." + end + + # + # Retrieve the Bigdata source from the specified subversion branch: + # + execute "checkout bigdata from svn repo" do + user 'ubuntu' + group 'ubuntu' + cwd "/home/ubuntu" + command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}" + end + + # + # Build the bigdata release package: + # + execute "ant deploy-artifact" do + user 'ubuntu' + group 'ubuntu' + cwd node['bigdata'][:source_dir] + command "ant deploy-artifact" + end + + # + # Extract the just built release package, thus installing it in the Bigdata home directory: + # + execute "deflate REL tar" do + user node['bigdata'][:user] + group node['bigdata'][:group] + cwd "#{node['bigdata'][:home]}/.." + command "tar xvf #{node['bigdata'][:source_dir]}/REL.bigdata-1.3.0-*.tgz" + end + + # + # Install hte bigdataHA service file: + # + execute "copy over the /etc/init.d/bigdataHA file" do + user 'root' + group 'root' + cwd "#{node['bigdata'][:home]}/etc/init.d" + command "cp bigdataHA /etc/init.d/bigdataHA; chmod 00755 /etc/init.d/bigdataHA" + end + + # + # Create the log directory for bigdata: + # + directory node['bigdata'][:log_dir] do + owner node['bigdata'][:user] + group node['bigdata'][:group] + mode 00755 + action :create + end + + # + # Install the log4jHA.properties file: + # + template "#{node['bigdata'][:home]}/var/config/logging/log4jHA.properties" do + source "log4jHA.properties.erb" + owner node['bigdata'][:user] + group node['bigdata'][:group] + mode 00644 + end + + # + # Install the jetty.xml file: + # + template "#{node['bigdata'][:jetty_dir]}/jetty.xml" do + source "jetty.xml.erb" + owner node['bigdata'][:user] + group node['bigdata'][:group] + mode 00644 + end + + # + # Set the absolute path to the RWStore.properties file + # + execute "set absolute path to RWStore.properties" do + cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF" + command "sed -i 's|<param-value>WEB-INF/RWStore.properties|<param-value>#{node['bigdata'][:jetty_dir]}/WEB-INF/RWStore.properties|' web.xml" + end + + # + # Install the RWStore.properties file: + # + template node['bigdata'][:properties] do + source "RWStore.properties.erb" + owner node['bigdata'][:user] + group node['bigdata'][:group] + mode 00644 + end + + # + # Copy the /etc/default/bigdataHA template: + # + template "/etc/default/bigdataHA" do + source "default/bigdataHA.erb" + user 'root' + group 'root' + mode 00644 + end + + # + # Setup the bigdataHA script as a service: + # + service "bigdataHA" do + supports :restart => true, :status => true + action [ :enable, :start ] + end + + # + # Install the zoo.cfg file: + # + template "/etc/zookeeper/conf/zoo.cfg" do + source "zoo.cfg.erb" + owner 'root' + group 'root' + mode 00644 + end + + # + # The hadoop cookbook overlooks the log4j.properties file presently, but a future version may get this right: + # + execute "copy the distribution log4j.properties file" do + user 'root' + group 'root' + cwd "/etc/zookeeper/conf.chef" + command "cp ../conf.dist/log4j.properties ." + end +end Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/mapgraph.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/mapgraph.rb 2014-05-16 18:36:34 UTC (rev 8348) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/mapgraph.rb 2014-05-16 19:05:43 UTC (rev 8349) @@ -1,24 +1,42 @@ # -# Cookbook Name:: systap-bigdata -# Recipe:: default +# Cookbook Name:: bigdata +# Recipe:: mapgraph # -# Copyright 2013, Systap +# Copyright 2014, Systap # + # +# MapGraph Installer +# +include_recipe "java" + + +# +# Make sure the Bigdata home directory is owned by the bigdata user and group: +# execute "pull mapgraph from svn repo" do user 'ubuntu' group 'ubuntu' cwd "/home/ubuntu" - command "svn checkout #{default['mapgraph'][:svn_branch]} #{node['mapgraph'][:source]}" + command "svn checkout #{default['mapgraph'][:svn_branch]} #{node['mapgraph'][:source_dir]}" end + +# +# Build MapGgraph: +# execute "make mapgraph" do - cwd node['mapgraph'][:source] + cwd node['mapgraph'][:source_dir] command "make" end + + +# +# Run a basic test of MapGraph: +# execute "test mapgraph" do - cwd node['mapgraph'][:source] + cwd node['mapgraph'][:source_dir] command "./Algorithms/SSSP/SSSP -g smallRegressionGraphs/small.mtx" end Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/nss.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/nss.rb 2014-05-16 18:36:34 UTC (rev 8348) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/nss.rb 2014-05-16 19:05:43 UTC (rev 8349) @@ -1,14 +1,28 @@ -include_recipe "java" # -# Only do the following for tomcat install +# Cookbook Name:: bigdata +# Recipe:: nss # -if node['bigdata'][:install_type] == "nss" +# Copyright 2014, Systap +# +# +# Only do the following for Bigdata NSS install +# +if node['bigdata'][:install_flavor] == "nss" + + include_recipe "java" + + # + # Create the bigdata systm group: + # group node['bigdata'][:group] do action :create append true end + # + # Create the bigdata systm user: + # user node['bigdata'][:user] do gid node['bigdata'][:group] supports :manage_home => true @@ -21,25 +35,34 @@ if node['bigdata'][:build_from_svn] include_recipe "ant" - include_recipe "subversion" + include_recipe "subversion::client" + # + # Retrieve the Bigdata source from the specified subversion branch: + # execute "checkout bigdata from svn repo" do user 'ubuntu' group 'ubuntu' cwd "/home/ubuntu" - command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source]}" + command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}" end + # + # Build the bigdata release package: + # execute "build the nss tar ball" do user 'ubuntu' group 'ubuntu' - cwd "/home/ubuntu/#{node['bigdata'][:source]}" + cwd node['bigdata'][:source_dir] command "ant package-nss-brew" end + # + # Extract the just built release package, thus installing it in the Bigdata home directory: + # execute "Extract and relocate the bigdata archive" do cwd "/var/lib" - command "tar xvf /home/ubuntu/#{node['bigdata'][:source]}/REL-NSS.bigdata-1.*.tgz" + command "tar xvf #{node['bigdata'][:source_dir]}/REL-NSS.bigdata-1.*.tgz" end else # @@ -51,6 +74,9 @@ source node['bigdata'][:url] end + # + # Extract the just retrieved release package, thus installing it in the Bigdata home directory: + # execute "Extract and relocate the bigdata archive" do cwd "/var/lib" command "tar xvf /tmp/bigdata.tgz" @@ -65,6 +91,9 @@ end end + # + # Make sure the Bigdata home directory is owned by the bigdata user and group: + # execute "change the ownership of the bigdata home directory to bigdata, which strangely is not" do user "root" group "root" @@ -72,38 +101,56 @@ command "chown -R #{node['bigdata'][:user]}:#{node['bigdata'][:group]} ." end + # + # Create a symbolic link of the bin/bigdataNSS script to /etc/init.d/bigdataNSS: + # link "/etc/init.d/bigdataNSS" do to "#{node['bigdata'][:home]}/bin/bigdataNSS" end # - # We shell out to make template substitutions + # Set the install type in the bin/bigdataNSS script: # execute "set the INSTALL_TYPE in bin/bigdata" do cwd "#{node['bigdata'][:home]}/bin" - command "sed -i 's|<%= INSTALL_TYPE %>|#{node['bigdata'][:install_type]}|' bigdataNSS" + command "sed -i 's|<%= INSTALL_TYPE %>|#{node['bigdata'][:install_flavor]}|' bigdataNSS" end + # + # Set the Bigdata home directory in the bin/bigdataNSS file: + # execute "set the BD_HOME in bin/bigdata" do cwd "#{node['bigdata'][:home]}/bin" command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' bigdataNSS" end + # + # Set the absolute path to the bigdata.jnl file in RWStore.properties + # execute "set the BD_HOME in RWStore.properties" do cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF" command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' RWStore.properties" end + # + # Set the Bigdata home directory in the log4j.properties file to set the path for the log files: + # execute "set the BD_HOME in log4j.properties" do cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF/classes" command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' log4j.properties" end + # + # Install the jetty.xml file: + # execute "set the JETTY_DIR in jetty.xml" do cwd "#{node['bigdata'][:jetty_dir]}/etc/" command "sed -i 's|<%= JETTY_DIR %>|#{node['bigdata'][:jetty_dir]}|' jetty.xml" end + # + # Setup the bigdataNSS script as a service: + # service "bigdataNSS" do # # Reenable this when the bin/bigdata script is updated to return a "1" for a successful status: Added: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/ssd.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/ssd.rb (rev 0) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/ssd.rb 2014-05-16 19:05:43 UTC (rev 8349) @@ -0,0 +1,39 @@ +# +# Cookbook Name:: bigdata +# Recipe:: ssd +# +# Copyright 2014, Systap +# + +# +# SSD Setup +# +include_recipe "lvm" + + +# +# Create the directory that will be the mount target: +# +directory node['bigdata'][:data_dir] do + owner "root" + group "root" + mode 00755 + action :create + recursive true +end + + +# +# Create and mount the logical volume: +# +lvm_volume_group 'vg' do + action :create + physical_volumes ['/dev/xvdb', '/dev/xvdc'] + + logical_volume 'lv_bigdata' do + size '100%VG' + filesystem 'ext4' + mount_point location: node['bigdata'][:data_dir], options: 'noatime,nodiratime' + # stripes 4 + end +end Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/tomcat.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/tomcat.rb 2014-05-16 18:36:34 UTC (rev 8348) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/tomcat.rb 2014-05-16 19:05:43 UTC (rev 8349) @@ -1,16 +1,18 @@ # # Cookbook Name:: bigdata -# Recipe:: default +# Recipe:: tomcat # -include_recipe "java" -include_recipe "tomcat" +# Copyright 2014, Systap +# -# include_attributes "bigdata::tomcat" - # -# Only do the following for tomcat install +# Only do the following for Bigdata Tomcat install # -if node['bigdata'][:install_type] == "tomcat" +if node['bigdata'][:install_flavor] == "tomcat" + + include_recipe "java" + include_recipe "tomcat" + # # The tomcat cookbook provides an /etc/default/tomcat7 file that contains multiple JAVA_OPTS lines but allows you to # modify only one of them during installation. As a consequence JAVA_OPTS conflicts may occur. We comment out the @@ -27,34 +29,40 @@ if node['bigdata'][:build_from_svn] include_recipe "ant" - include_recipe "subversion" + include_recipe "subversion::client" + # + # Retrieve the Bigdata source from the specified subversion branch: + # execute "checkout bigdata from svn repo" do user 'ubuntu' group 'ubuntu' cwd "/home/ubuntu" - command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source]}" + command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}" end + # + # Build the bigdata.war file: + # execute "build the war file" do - user 'ubuntu' - group 'ubuntu' - cwd "/home/ubuntu/#{node['bigdata'][:source]}" - command "ant war" + user 'ubuntu' + group 'ubuntu' + cwd node['bigdata'][:source_dir] + command "ant war" end # # Install the WAR file: # remote_file "#{node['tomcat'][:webapp_dir]}/bigdata.war" do - source "file:///home/ubuntu/#{node['bigdata'][:source]}/ant-build/bigdata.war" + source "file:///#{node['bigdata'][:source_dir]}/ant-build/bigdata.war" owner node['tomcat'][:user] group node['tomcat'][:group] end else # - # Install the WAR file: + # Install the WAR file from the SourceForge URL: # remote_file "#{node['tomcat'][:webapp_dir]}/bigdata.war" do source node['bigdata'][:url] @@ -128,6 +136,9 @@ # suitable against a larger range of bigdata releases. # if node['bigdata'][:build_from_svn] + # + # Set the RWStore.properties path in the web.xml file: + # execute "set absolute path for RWStore.properties" do cwd "#{node['bigdata'][:web_home]}/WEB-INF" command "sed -i 's|<param-value>../webapps/bigdata/WEB-INF/RWStore.properties|<param-value>#{node['bigdata'][:home]}/RWStore.properties|' web.xml" Added: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/default/bigdataHA.erb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/default/bigdataHA.erb (rev 0) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/default/bigdataHA.erb 2014-05-16 19:05:43 UTC (rev 8349) @@ -0,0 +1,51 @@ +# Environment for bigdata HA services. +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Note: You MUST provide the location of the executable scripts and the +# pid file that is written by $binDir/startHAServices. These SHOULD be +# absolute path names. + +BD_USER="<%= node['bigdata'][:user] %>" +BD_GROUP="<%= node['bigdata'][:group] %>" + +binDir=<%= node['bigdata'][:home] %>/bin +pidFile=<%= node['bigdata'][:home] %>/var/lock/pid + +## +# The following variables configure the startHAServices script, which +# passes them through to HAJournal.config. +## + +# Name of the bigdata gederation of services. Override for real install. +export FEDNAME=<%= node['bigdata'][:fedname] %> + +# This is different for each HA replication cluster in the same federation +# of services. If you have multiple such replication cluster, then just +# given each such cluster its own name. +export LOGICAL_SERVICE_ID=<%= node['bigdata'][:logical_service_id] %> + +# Local directory where the service will store its state. +export FED_DIR=<%= node['bigdata'][:home] %> +export DATA_DIR=<%= node['bigdata'][:data_dir] %> + +# Apache River - NO default for "LOCATORS". +export GROUPS="${FEDNAME}" +export LOCATORS="%JINI_LOCATORS%" + +# Apache ZooKeeper - NO default. +export ZK_SERVERS="<%= node['bigdata'][:zk_server1] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>,<%= node['bigdata'][:zk_server2] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>,<%= node['bigdata'][:zk_server3] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>" + + +# All of these have defaults. Override as necessary. +#export REPLICATION_FACTOR=3 +#export HA_PORT=9090 +#export JETTY_PORT=8080 +#export JETTY_XML=var/jetty/jetty.xml +#export JETTY_RESOURCE_BASE=var/jetty/html +#export COLLECT_QUEUE_STATISTICS= +#export COLLECT_PLATFORM_STATISTICS= +#export GANGLIA_REPORT= +#export GANGLIA_LISTENER= +#export SYSSTAT_DIR= Added: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/jetty.xml.erb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/jetty.xml.erb (rev 0) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/jetty.xml.erb 2014-05-16 19:05:43 UTC (rev 8349) @@ -0,0 +1,195 @@ +<?xml version="1.0"?> +<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure.dtd"> +<!-- See http://www.eclipse.org/jetty/documentation/current/ --> +<!-- See http://wiki.eclipse.org/Jetty/Reference/jetty.xml_syntax --> +<Configure id="Server" class="org.eclipse.jetty.server.Server"> + + <!-- =========================================================== --> + <!-- Configure the Server Thread Pool. --> + <!-- The server holds a common thread pool which is used by --> + <!-- default as the executor used by all connectors and servlet --> + <!-- dispatches. --> + <!-- --> + <!-- Configuring a fixed thread pool is vital to controlling the --> + <!-- maximal memory footprint of the server and is a key tuning --> + <!-- parameter for tuning. In an application that rarely blocks --> + <!-- then maximal threads may be close to the number of 5*CPUs. --> + <!-- In an application that frequently blocks, then maximal --> + <!-- threads should be set as high as possible given the memory --> + <!-- available. --> + <!-- --> + <!-- Consult the javadoc of o.e.j.util.thread.QueuedThreadPool --> + <!-- for all configuration that may be set here. --> + <!-- =========================================================== --> + <!-- uncomment to change type of threadpool --> + <Arg name="threadpool"><New id="threadpool" class="org.eclipse.jetty.util.thread.QueuedThreadPool"/></Arg> + <!-- --> + <Get name="ThreadPool"> + <Set name="minThreads" type="int"><SystemProperty name="jetty.threads.min" default="10"/></Set> + <Set name="maxThreads" type="int"><SystemProperty name="jetty.threads.max" default="64"/></Set> + <Set name="idleTimeout" type="int"><SystemProperty name="jetty.threads.timeout" default="60000"/></Set> + <Set name="detailedDump">false</Set> + </Get> + + <!-- =========================================================== --> + <!-- Get the platform mbean server --> + <!-- =========================================================== --> + <Call id="MBeanServer" class="java.lang.management.ManagementFactory" + name="getPlatformMBeanServer" /> + + <!-- =========================================================== --> + <!-- Initialize the Jetty MBean container --> + <!-- =========================================================== --> + <!-- Note: This breaks CI if it is enabled + <Call name="addBean"> + <Arg> + <New id="MBeanContainer" class="org.eclipse.jetty.jmx.MBeanContainer"> + <Arg> + <Ref refid="MBeanServer" /> + </Arg> + </New> + </Arg> + </Call>--> + + <!-- Add the static log to the MBean server. + <Call name="addBean"> + <Arg> + <New class="org.eclipse.jetty.util.log.Log" /> + </Arg> + </Call>--> + + <!-- For remote MBean access (optional) + <New id="ConnectorServer" class="org.eclipse.jetty.jmx.ConnectorServer"> + <Arg> + <New class="javax.management.remote.JMXServiceURL"> + <Arg type="java.lang.String">rmi</Arg> + <Arg type="java.lang.String" /> + <Arg type="java.lang.Integer"><SystemProperty name="jetty.jmxrmiport" default="1090"/></Arg> + <Arg type="java.lang.String">/jndi/rmi://<SystemProperty name="jetty.jmxrmihost" default="localhost"/>:<SystemProperty name="jetty.jmxrmiport" default="1099"/>/jmxrmi</Arg> + </New> + </Arg> + <Arg>org.eclipse.jetty.jmx:name=rmiconnectorserver</Arg> + <Call name="start" /> + </New>--> + + <!-- =========================================================== --> + <!-- Http Configuration. --> + <!-- This is a common configuration instance used by all --> + <!-- connectors that can carry HTTP semantics (HTTP, HTTPS, SPDY)--> + <!-- It configures the non wire protocol aspects of the HTTP --> + <!-- semantic. --> + <!-- --> + <!-- Consult the javadoc of o.e.j.server.HttpConfiguration --> + <!-- for all configuration that may be set here. --> + <!-- =========================================================== --> + <New id="httpConfig" class="org.eclipse.jetty.server.HttpConfiguration"> + <Set name="secureScheme">https</Set> + <Set name="securePort"><Property name="jetty.secure.port" default="8443" /></Set> + <Set name="outputBufferSize"><Property name="jetty.output.buffer.size" default="32768" /></Set> + <Set name="requestHeaderSize"><Property name="jetty.request.header.size" default="8192" /></Set> + <Set name="responseHeaderSize"><Property name="jetty.response.header.size" default="8192" /></Set> + <Set name="sendServerVersion"><Property name="jetty.send.server.version" default="true" /></Set> + <Set name="sendDateHeader"><Property name="jetty.send.date.header" default="false" /></Set> + <Set name="headerCacheSize">512</Set> + <!-- Uncomment to enable handling of X-Forwarded- style headers + <Call name="addCustomizer"> + <Arg><New class="org.eclipse.jetty.server.ForwardedRequestCustomizer"/></Arg> + </Call> + --> + </New> + + <!-- Configure the HTTP endpoint. --> + <Call name="addConnector"> + <Arg> + <New class="org.eclipse.jetty.server.ServerConnector"> + <Arg name="server"><Ref refid="Server" /></Arg> + <Arg name="factories"> + <Array type="org.eclipse.jetty.server.ConnectionFactory"> + <Item> + <New class="org.eclipse.jetty.server.HttpConnectionFactory"> + <Arg name="config"><Ref refid="httpConfig" /></Arg> + </New> + </Item> + </Array> + </Arg> + <Set name="host"><Property name="jetty.host" /></Set> + <Set name="port"><Property name="jetty.port" default="8080" /></Set> + <Set name="idleTimeout"><Property name="http.timeout" default="30000"/></Set> + </New> + </Arg> + </Call> + + <!-- =========================================================== --> + <!-- Set handler Collection Structure --> + <!-- =========================================================== --> + <!-- Recommended approach: does not work for HA CI test suite. + <Set name="handler"> + <New id="Handlers" class="org.eclipse.jetty.server.handler.HandlerCollection"> + <Set name="handlers"> + <Array type="org.eclipse.jetty.server.Handler"> + <Item> + <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> + <Set name="war"> + <SystemProperty name="jetty.resourceBase" default="bigdata-war/src" /> + </Set> + <Set name="contextPath">/bigdata</Set> + <Set name="descriptor">WEB-INF/web.xml</Set> + <Set name="parentLoaderPriority">true</Set> + <Set name="extractWAR">false</Set> + </New> + </Item> + </Array> + </Set> + </New> + </Set> --> + <Set name="handler"> + <New id="Handlers" class="org.eclipse.jetty.server.handler.HandlerCollection"> + <Set name="handlers"> + <Array type="org.eclipse.jetty.server.Handler"> + <Item> + <!-- This is the bigdata web application. --> + <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> + <Set name="resourceBase"> + <!-- The location of the top-level of the bigdata webapp. --> + <Property name="jetty.resourceBase" default="<%= node['bigdata'][:jetty_dir] %>" /> + </Set> + <Set name="contextPath">/bigdata</Set> + <Set name="descriptor">WEB-INF/web.xml</Set> + <Set name="descriptor"><%= node['bigdata'][:jetty_dir] %>/WEB-INF/web.xml</Set> + <Set name="parentLoaderPriority">true</Set> + <Set name="extractWAR">false</Set> + </New> + </Item> + <Item> + <!-- This appears to be necessary in addition to the above. --> + <!-- Without this, it will not resolve http://localhost:8080/ --> + <!-- and can fail to deliver some of the static content. --> + <New id="ResourceHandler" class="org.eclipse.jetty.server.handler.ResourceHandler"> + <Set name="resourceBase"> + <!-- The location of the top-level of the bigdata webapp. --> + <Property name="jetty.resourceBase" default="<%= node['bigdata'][:jetty_dir] %>" /> + </Set> + <Set name="welcomeFiles"> + <Array type="java.lang.String"> + <Item>html/index.html</Item> + </Array> + </Set> + </New> + </Item> + <!-- <Item> + <New id="DefaultHandler" class="org.eclipse.jetty.server.handler.DefaultHandler"></New> + </Item> --> + </Array> + </Set> + </New> + </Set> + + <!-- =========================================================== --> + <!-- extra server options --> + <!-- =========================================================== --> + <Set name="stopAtShutdown">true</Set> + <Set name="stopTimeout">5000</Set> + <Set name="dumpAfterStart"><Property name="jetty.dump.start" default="false"/></Set> + <Set name="dumpBeforeStop"><Property name="jetty.dump.stop" default="false"/></Set> + +</Configure> Added: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/log4jHA.properties.erb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/log4jHA.properties.erb (rev 0) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/log4jHA.properties.erb 2014-05-16 19:05:43 UTC (rev 8349) @@ -0,0 +1,85 @@ +## +# This is the default log4j configuration for distribution and CI tests. +## + +# Note: logging at INFO or DEBUG will significantly impact throughput! +log4j.rootCategory=WARN, dest2 + +log4j.logger.com.bigdata=WARN +log4j.logger.com.bigdata.btree=WARN +log4j.logger.com.bigdata.counters.History=ERROR +log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=ERROR +log4j.logger.com.bigdata.counters.query.CounterSetQuery=INFO +log4j.logger.com.bigdata.journal.CompactTask=INFO +log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=ERROR +log4j.logger.com.bigdata.rdf.load=INFO +log4j.logger.com.bigdata.rdf.store.DataLoader=INFO +log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=INFO + +log4j.logger.com.bigdata.rdf.sail.webapp.NanoSparqlServer=INFO + +# This will only work if you have the slf4j bridge setup. +#log4j.org.eclipse.jetty.util.log.Log=INFO + +# This can provide valuable information about open connections. +log4j.logger.com.bigdata.txLog=INFO + +# HA related loggers (debugging only) +log4j.logger.com.bigdata.ha=INFO +#log4j.logger.com.bigdata.haLog=INFO +##log4j.logger.com.bigdata.rwstore=ALL +#log4j.logger.com.bigdata.journal=INFO +##log4j.logger.com.bigdata.journal.AbstractBufferStrategy=ALL +log4j.logger.com.bigdata.journal.jini.ha=INFO +##log4j.logger.com.bigdata.service.jini.lookup=ALL +log4j.logger.com.bigdata.quorum=INFO +log4j.logger.com.bigdata.quorum.zk=INFO +#log4j.logger.com.bigdata.quorum.quorumState=ALL,destPlain +##log4j.logger.com.bigdata.io.writecache=ALL + +# dest2 includes the thread name and elapsed milliseconds. +# Note: %r is elapsed milliseconds. +# Note: %t is the thread name. +# See http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/PatternLayout.html +#log4j.appender.dest2=org.apache.log4j.ConsoleAppender +log4j.appender.dest2=org.apache.log4j.RollingFileAppender +log4j.appender.dest2.File=<%= node['bigdata'][:log_dir] %>/HAJournalServer.log +log4j.appender.dest2.MaxFileSize=500MB +log4j.appender.dest2.MaxBackupIndex=20 +log4j.appender.dest2.layout=org.apache.log4j.PatternLayout +log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %d{ISO8601} %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n + +## destPlain +#log4j.appender.destPlain=org.apache.log4j.ConsoleAppender +#log4j.appender.destPlain.layout=org.apache.log4j.PatternLayout +#log4j.appender.destPlain.layout.ConversionPattern= + +## +# Summary query evaluation log (tab delimited file). +#log4j.logger.com.bigdata.bop.engine.QueryLog=INFO,queryLog +log4j.additivity.com.bigdata.bop.engine.QueryLog=false +log4j.appender.queryLog=org.apache.log4j.FileAppender +log4j.appender.queryLog.Threshold=ALL +log4j.appender.queryLog.File=<%= node['bigdata'][:log_dir] %>/queryLog.csv +log4j.appender.queryLog.Append=true +# I find that it is nicer to have this unbuffered since you can see what +# is going on and to make sure that I have complete rule evaluation logs +# on shutdown. +log4j.appender.queryLog.BufferedIO=false +log4j.appender.queryLog.layout=org.apache.log4j.PatternLayout +log4j.appender.queryLog.layout.ConversionPattern=%m + +## +# BOp run state trace (tab delimited file). Uncomment the next line to enable. +#log4j.logger.com.bigdata.bop.engine.RunState$TableLog=INFO,queryRunStateLog +log4j.additivity.com.bigdata.bop.engine.RunState$TableLog=false +log4j.appender.queryRunStateLog=org.apache.log4j.FileAppender +log4j.appender.queryRunStateLog.Threshold=ALL +log4j.appender.queryRunStateLog.File=<%= node['bigdata'][:log_dir] %>/queryRunState.log +log4j.appender.queryRunStateLog.Append=true +# I find that it is nicer to have this unbuffered since you can see what +# is going on and to make sure that I have complete rule evaluation logs +# on shutdown. +log4j.appender.queryRunStateLog.BufferedIO=false +log4j.appender.queryRunStateLog.layout=org.apache.log4j.PatternLayout +log4j.appender.queryRunStateLog.layout.ConversionPattern=%m Added: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/zoo.cfg.erb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/zoo.cfg.erb (rev 0) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/zoo.cfg.erb 2014-05-16 19:05:43 UTC (rev 8349) @@ -0,0 +1,15 @@ +clientPort=<%= node['zookeeper'][:zoocfg][:clientPort] %> +dataDir=<%= node['zookeeper'][:zoocfg][:dataDir] %> +dataLogDir=<%= node['zookeeper'][:zoocfg][:dataDir] %> +# The number of milliseconds of each tick +tickTime=2000 +# The number of ticks that the initial +# synchronization phase can take +initLimit=10 +# The number of ticks that can pass between +# sending a request and getting an acknowledgement +syncLimit=5 +# the ensemble +server.1=<%= node['bigdata'][:zk_server1] %>:2888:3888 +server.2=<%= node['bigdata'][:zk_server2] %>:2888:3888 +server.3=<%= node['bigdata'][:zk_server3] %>:2888:3888 This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-05-16 18:36:37
|
Revision: 8348 http://sourceforge.net/p/bigdata/code/8348 Author: mrpersonick Date: 2014-05-16 18:36:34 +0000 (Fri, 16 May 2014) Log Message: ----------- fixed some Blueprints CI errors related to the 2.5.0 upgrade Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-16 14:59:23 UTC (rev 8347) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-16 18:36:34 UTC (rev 8348) @@ -144,9 +144,11 @@ return null; - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -192,9 +194,11 @@ return props; - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -233,9 +237,11 @@ return properties; - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -297,9 +303,11 @@ cxn().add(uri, prop, val); - } catch (Exception e) { - throw new RuntimeException(e); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -325,6 +333,8 @@ cxn().add(uri, prop, val); + } catch (RuntimeException e) { + throw e; } catch (Exception e) { throw new RuntimeException(e); } @@ -384,9 +394,11 @@ return new BigdataEdge(new StatementImpl(fromURI, edgeURI, toURI), this); - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -412,9 +424,11 @@ return new BigdataVertex(uri, this); - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -449,9 +463,11 @@ return null; - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -539,8 +555,10 @@ return stmts; - } catch (Exception ex) { - throw new RuntimeException(ex); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); } } @@ -564,9 +582,11 @@ return new EdgeIterable(stmts); - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -621,9 +641,11 @@ return new VertexIterable(stmts, subject); - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -656,9 +678,11 @@ return getEdges(queryStr); - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -681,9 +705,11 @@ return null; - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -701,9 +727,11 @@ return new VertexIterable(result, true); - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -723,9 +751,11 @@ return new VertexIterable(result, true); - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -761,9 +791,11 @@ // remove its properties cxn().remove(uri, wild, wild); - } catch (Exception e) { - throw new RuntimeException(e); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -789,7 +821,9 @@ // remove incoming edges cxn().remove(wild, wild, uri); - } catch (Exception e) { + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { throw new RuntimeException(e); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-16 14:59:23 UTC (rev 8347) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-16 18:36:34 UTC (rev 8348) @@ -29,19 +29,19 @@ /** * This is a thin-client implementation of a Blueprints wrapper around the - * client library that interacts with the NanoSparqlServer. This is a functional + * client library that interacts with the NanoSparqlServer. This is a functional * implementation suitable for writing POCs - it is not a high performance - * implementation by any means (currently does not support caching, batched - * update, or Blueprints query re-writes). Does have a single "bulk upload" - * operation that wraps a method on RemoteRepository that will POST a graphml - * file to the blueprints layer of the bigdata server. + * implementation by any means (currently does not support caching or batched + * update). Does have a single "bulk upload" operation that wraps a method on + * RemoteRepository that will POST a graphml file to the blueprints layer of the + * bigdata server. * * @see {@link BigdataSailRemoteRepository} * @see {@link BigdataSailRemoteRepositoryConnection} * @see {@link RemoteRepository} * * @author mikepersonick - * + * */ public class BigdataGraphClient extends BigdataGraph { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java 2014-05-16 14:59:23 UTC (rev 8347) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java 2014-05-16 18:36:34 UTC (rev 8348) @@ -30,6 +30,7 @@ import org.openrdf.model.URI; import org.openrdf.model.ValueFactory; import org.openrdf.model.impl.ValueFactoryImpl; +import org.openrdf.model.vocabulary.RDFS; import com.bigdata.rdf.internal.XSD; import com.tinkerpop.blueprints.Edge; @@ -138,7 +139,19 @@ try { - return vf.createURI(GRAPH_NAMESPACE, URLEncoder.encode(property, "UTF-8")); + if (property.equals("label")) { + + /* + * Label is a reserved property for edge labels, we use + * rdfs:label for that. + */ + return RDFS.LABEL; + + } else { + + return vf.createURI(GRAPH_NAMESPACE, URLEncoder.encode(property, "UTF-8")); + + } } catch (UnsupportedEncodingException e) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java 2014-05-16 14:59:23 UTC (rev 8347) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java 2014-05-16 18:36:34 UTC (rev 8348) @@ -68,7 +68,8 @@ test.doTestSuite(new TransactionalGraphTestSuite(test)); GraphTest.printTestPerformance("TransactionalGraphTestSuite", test.stopWatch()); - } + } + // public void testGraphSuite() throws Exception { // final GraphTest test = newBigdataGraphTest(); // test.stopWatch(); @@ -77,12 +78,12 @@ //} -// public void testTransactionIsolationCommitCheck() throws Exception { +// public void testGetEdgesByLabel() throws Exception { // final BigdataGraphTest test = new BigdataGraphTest(); // test.stopWatch(); // final BigdataTestSuite testSuite = new BigdataTestSuite(test); // try { -// testSuite.testTransactionIsolationCommitCheck(); +// testSuite.testGetEdgesByLabel(); // } finally { // test.shutdown(); // } @@ -95,71 +96,25 @@ super(graphTest); } - public void testTransactionIsolationCommitCheck() throws Exception { - // the purpose of this test is to simulate rexster access to a graph instance, where one thread modifies - // the graph and a separate thread cannot affect the transaction of the first - final TransactionalGraph graph = (TransactionalGraph) graphTest.generateGraph(); - - final CountDownLatch latchCommittedInOtherThread = new CountDownLatch(1); - final CountDownLatch latchCommitInOtherThread = new CountDownLatch(1); - - // this thread starts a transaction then waits while the second thread tries to commit it. - final Thread threadTxStarter = new Thread() { - public void run() { - System.err.println(Thread.currentThread().getId() + ": 1"); - final Vertex v = graph.addVertex(null); - - // System.out.println("added vertex"); - - System.err.println(Thread.currentThread().getId() + ": 2"); - latchCommitInOtherThread.countDown(); - - System.err.println(Thread.currentThread().getId() + ": 3"); - try { - latchCommittedInOtherThread.await(); - } catch (InterruptedException ie) { - throw new RuntimeException(ie); - } - - System.err.println(Thread.currentThread().getId() + ": 4"); - graph.rollback(); - - System.err.println(Thread.currentThread().getId() + ": 5"); - // there should be no vertices here - // System.out.println("reading vertex before tx"); - assertFalse(graph.getVertices().iterator().hasNext()); - // System.out.println("read vertex before tx"); - } - }; - - threadTxStarter.start(); - - // this thread tries to commit the transaction started in the first thread above. - final Thread threadTryCommitTx = new Thread() { - public void run() { - System.err.println(Thread.currentThread().getId() + ": 6"); - try { - latchCommitInOtherThread.await(); - } catch (InterruptedException ie) { - throw new RuntimeException(ie); - } - - System.err.println(Thread.currentThread().getId() + ": 7"); - // try to commit the other transaction - graph.commit(); - - System.err.println(Thread.currentThread().getId() + ": 8"); - latchCommittedInOtherThread.countDown(); - System.err.println(Thread.currentThread().getId() + ": 9"); - } - }; - - threadTryCommitTx.start(); - - threadTxStarter.join(); - threadTryCommitTx.join(); - graph.shutdown(); - + public void testGetEdgesByLabel() { + Graph graph = graphTest.generateGraph(); + if (graph.getFeatures().supportsEdgeIteration) { + Vertex v1 = graph.addVertex(null); + Vertex v2 = graph.addVertex(null); + Vertex v3 = graph.addVertex(null); + + Edge e1 = graph.addEdge(null, v1, v2, graphTest.convertLabel("test1")); + Edge e2 = graph.addEdge(null, v2, v3, graphTest.convertLabel("test2")); + Edge e3 = graph.addEdge(null, v3, v1, graphTest.convertLabel("test3")); + + assertEquals(e1, getOnlyElement(graph.query().has("label", graphTest.convertLabel("test1")).edges())); + assertEquals(e2, getOnlyElement(graph.query().has("label", graphTest.convertLabel("test2")).edges())); + assertEquals(e3, getOnlyElement(graph.query().has("label", graphTest.convertLabel("test3")).edges())); + + assertEquals(e1, getOnlyElement(graph.getEdges("label", graphTest.convertLabel("test1")))); + assertEquals(e2, getOnlyElement(graph.getEdges("label", graphTest.convertLabel("test2")))); + assertEquals(e3, getOnlyElement(graph.getEdges("label", graphTest.convertLabel("test3")))); + } } @@ -173,6 +128,7 @@ private class BigdataGraphTest extends GraphTest { private List<String> exclude = Arrays.asList(new String[] { + // this one creates a deadlock, no way around it "testTransactionIsolationCommitCheck" }); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java 2014-05-16 14:59:23 UTC (rev 8347) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java 2014-05-16 18:36:34 UTC (rev 8348) @@ -646,7 +646,7 @@ public void add(final Statement stmt, final Resource... c) throws RepositoryException { - log.warn("single statement updates not recommended"); +// log.warn("single statement updates not recommended"); final Graph g = new GraphImpl(); g.add(stmt); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-05-16 14:59:26
|
Revision: 8347 http://sourceforge.net/p/bigdata/code/8347 Author: tobycraig Date: 2014-05-16 14:59:23 +0000 (Fri, 16 May 2014) Log Message: ----------- Add angle brackets to URI in explore tab form if they're not present Modified Paths: -------------- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js Modified: branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-05-16 14:06:45 UTC (rev 8346) +++ branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-05-16 14:59:23 UTC (rev 8347) @@ -932,8 +932,16 @@ $('#explore-form').submit(function(e) { e.preventDefault(); - var uri = $(this).find('input').val(); + var uri = $(this).find('input').val().trim(); if(uri) { + // add < > if they're not present + if(uri[0] != '<') { + uri = '<' + uri; + } + if(uri.slice(-1) != '>') { + uri += '>'; + } + $(this).find('input').val(uri); loadURI(uri); // if this is a SID, make the components clickable This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-05-16 14:06:49
|
Revision: 8346 http://sourceforge.net/p/bigdata/code/8346 Author: tobycraig Date: 2014-05-16 14:06:45 +0000 (Fri, 16 May 2014) Log Message: ----------- FIxed namespace shortcuts not working with CodeMirror editors Modified Paths: -------------- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js Modified: branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-05-15 23:41:06 UTC (rev 8345) +++ branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-05-16 14:06:45 UTC (rev 8346) @@ -235,12 +235,12 @@ } $('.namespace-shortcuts li').click(function() { - var textarea = $(this).parents('.tab').find('textarea'); - var current = textarea.val(); + var tab = $(this).parents('.tab').attr('id').split('-')[0]; + var current = EDITORS[tab].getValue(); var ns = $(this).data('ns'); if(current.indexOf(ns) == -1) { - textarea.val(ns + '\n' + current); + EDITORS[tab].setValue(ns + '\n' + current); } }); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-05-15 23:41:10
|
Revision: 8345 http://sourceforge.net/p/bigdata/code/8345 Author: tobycraig Date: 2014-05-15 23:41:06 +0000 (Thu, 15 May 2014) Log Message: ----------- Increased size of CodeMirror editors Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css 2014-05-15 21:43:20 UTC (rev 8344) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css 2014-05-15 23:41:06 UTC (rev 8345) @@ -228,6 +228,7 @@ .CodeMirror { margin: 5px 0; border: 1px solid #e1e1e1; + font-size: 125%; } .CodeMirror-placeholder { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-05-15 21:43:24
|
Revision: 8344 http://sourceforge.net/p/bigdata/code/8344 Author: mrpersonick Date: 2014-05-15 21:43:20 +0000 (Thu, 15 May 2014) Log Message: ----------- again, fixed the rexster URL for the fetch-rexster task Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.xml Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-15 21:39:21 UTC (rev 8343) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-15 21:43:20 UTC (rev 8344) @@ -2592,10 +2592,10 @@ <target name="fetch-rexster" depends="prepare,compile,jar"> <echo>Installing Rexster...</echo> <get - src="http://www.tinkerpop.com/downloads/rexster/rexster-console-2.5.0.zip" - dest="${build.dir}/rexster-console-2.5.0.zip"/> - <unzip src="${build.dir}/rexster-console-2.5.0.zip" dest="${build.dir}/"/> - <delete file="${build.dir}/rexster-console-2.5.0.zip"/> + src="http://www.tinkerpop.com/downloads/rexster/rexster-server-2.5.0.zip" + dest="${build.dir}/rexster-server-2.5.0.zip"/> + <unzip src="${build.dir}/rexster-server-2.5.0.zip" dest="${build.dir}/"/> + <delete file="${build.dir}/rexster-server-2.5.0.zip"/> </target> <target name="install-rexster" depends="prepare,compile,jar,bundle"> Deleted: branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml 2014-05-15 21:39:21 UTC (rev 8343) +++ branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml 2014-05-15 21:43:20 UTC (rev 8344) @@ -1,54 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<graphml xmlns="http://graphml.graphdrawing.org/xmlns" - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns - http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> - <key id="weight" for="edge" attr.name="weight" attr.type="float"/> - <key id="name" for="node" attr.name="name" attr.type="string"/> - <key id="age" for="node" attr.name="age" attr.type="int"/> - <key id="lang" for="node" attr.name="lang" attr.type="string"/> - <graph id="G" edgedefault="directed"> - <node id="1"> - <data key="name">marko</data> - <data key="age">29</data> - </node> - <node id="2"> - <data key="name">vadas</data> - <data key="age">27</data> - </node> - <node id="3"> - <data key="name">lop</data> - <data key="lang">java</data> - </node> - <node id="4"> - <data key="name">josh</data> - <data key="age">32</data> - </node> - <node id="5"> - <data key="name">ripple</data> - <data key="lang">java</data> - </node> - <node id="6"> - <data key="name">peter</data> - <data key="age">35</data> - </node> - <edge id="7" source="1" target="2" label="knows"> - <data key="weight">0.5</data> - </edge> - <edge id="8" source="1" target="4" label="knows"> - <data key="weight">1.0</data> - </edge> - <edge id="9" source="1" target="3" label="created"> - <data key="weight">0.4</data> - </edge> - <edge id="10" source="4" target="5" label="created"> - <data key="weight">1.0</data> - </edge> - <edge id="11" source="4" target="3" label="created"> - <data key="weight">0.4</data> - </edge> - <edge id="12" source="6" target="3" label="created"> - <data key="weight">0.2</data> - </edge> - </graph> -</graphml> \ No newline at end of file This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-05-15 21:39:25
|
Revision: 8343 http://sourceforge.net/p/bigdata/code/8343 Author: mrpersonick Date: 2014-05-15 21:39:21 +0000 (Thu, 15 May 2014) Log Message: ----------- fixed the rexster URL for the fetch-rexster task Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.xml Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-15 21:21:54 UTC (rev 8342) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-15 21:39:21 UTC (rev 8343) @@ -2592,7 +2592,7 @@ <target name="fetch-rexster" depends="prepare,compile,jar"> <echo>Installing Rexster...</echo> <get - src="http://www.tinkerpop.com/downloads/gremlin/rexster-console-2.5.0.zip" + src="http://www.tinkerpop.com/downloads/rexster/rexster-console-2.5.0.zip" dest="${build.dir}/rexster-console-2.5.0.zip"/> <unzip src="${build.dir}/rexster-console-2.5.0.zip" dest="${build.dir}/"/> <delete file="${build.dir}/rexster-console-2.5.0.zip"/> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-05-15 21:22:00
|
Revision: 8342 http://sourceforge.net/p/bigdata/code/8342 Author: mrpersonick Date: 2014-05-15 21:21:54 +0000 (Thu, 15 May 2014) Log Message: ----------- upgraded to blueprints 2.5.0. added a rexster 2.5.0 dependency. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/.classpath branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Depends.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java branches/BIGDATA_RELEASE_1_3_0/build.properties branches/BIGDATA_RELEASE_1_3_0/build.xml branches/BIGDATA_RELEASE_1_3_0/pom.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/rexster.xml branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.4.0.jar branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.4.0.jar Modified: branches/BIGDATA_RELEASE_1_3_0/.classpath =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-15 21:21:54 UTC (rev 8342) @@ -94,8 +94,10 @@ <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-webapp-9.1.4.v20140401.jar" sourcepath="/Users/bryan/Downloads/org.eclipse.jetty.project-jetty-9.1.4.v20140401"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-xml-9.1.4.v20140401.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/jackson-core-2.2.3.jar"/> - <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-core-2.4.0.jar"/> - <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-test-2.4.0.jar"/> <classpathentry kind="lib" path="bigdata-blueprints/lib/jettison-1.3.3.jar"/> + <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-core-2.5.0.jar"/> + <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-test-2.5.0.jar"/> + <classpathentry kind="lib" path="bigdata-blueprints/lib/rexster-core-2.5.0.jar"/> + <classpathentry kind="lib" path="bigdata-blueprints/lib/commons-configuration-1.10.jar"/> <classpathentry kind="output" path="bin"/> </classpath> Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Depends.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Depends.java 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Depends.java 2014-05-15 21:21:54 UTC (rev 8342) @@ -277,6 +277,10 @@ "https://github.com/tinkerpop/blueprints", "https://github.com/tinkerpop/blueprints/blob/master/LICENSE.txt"); + private final static Dep rexsterCore = new Dep("rexster-core", + "https://github.com/tinkerpop/rexster", + "https://github.com/tinkerpop/rexster/blob/master/LICENSE.txt"); + static private final Dep[] depends; static { depends = new Dep[] { // @@ -306,6 +310,7 @@ servletApi,// jacksonCore,// blueprintsCore,// + rexsterCore,// bigdataGanglia,// // scale-out jini,// Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt 2014-05-15 21:21:54 UTC (rev 8342) @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt 2014-05-15 21:21:54 UTC (rev 8342) @@ -0,0 +1,24 @@ +Copyright (c) 2009-Infinity, TinkerPop [http://tinkerpop.com] +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the TinkerPop nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL TINKERPOP BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.4.0.jar =================================================================== (Binary files differ) Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar =================================================================== (Binary files differ) Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar 2014-05-15 21:21:54 UTC (rev 8342) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.4.0.jar =================================================================== (Binary files differ) Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar =================================================================== (Binary files differ) Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar 2014-05-15 21:21:54 UTC (rev 8342) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar =================================================================== (Binary files differ) Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar 2014-05-15 21:21:54 UTC (rev 8342) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar =================================================================== (Binary files differ) Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar 2014-05-15 21:21:54 UTC (rev 8342) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-15 21:21:54 UTC (rev 8342) @@ -1,5 +1,5 @@ /** -Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. Contact: SYSTAP, LLC Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java 2014-05-15 21:21:54 UTC (rev 8342) @@ -0,0 +1,146 @@ +/** +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.blueprints; + +import org.apache.commons.configuration.Configuration; + +import com.tinkerpop.rexster.config.GraphConfiguration; +import com.tinkerpop.rexster.config.GraphConfigurationContext; +import com.tinkerpop.rexster.config.GraphConfigurationException; + +/** + * Create and configure a BigdataGraph for Rexster. + * + * @author mikepersonick + * + */ +public class BigdataGraphConfiguration implements GraphConfiguration { + + public interface Options { + + /** + * Specify the type of bigdata instance to use - embedded or remote. + */ + String TYPE = "properties.type"; + + /** + * Specifies that an embedded bigdata instance should be used. + */ + String TYPE_EMBEDDED = "embedded"; + + /** + * Specifies that a remote bigdata instance should be used. + */ + String TYPE_REMOTE = "remote"; + + /** + * Journal file for an embedded bigdata instance. + */ + String FILE = "properties.file"; + + /** + * Host for a remote bigdata instance. + */ + String HOST = "properties.host"; + + /** + * Port for a remote bigdata instance. + */ + String PORT = "properties.port"; + + } + + /** + * Configure and return a BigdataGraph based on the supplied configuration + * parameters. + * + * @see {@link Options} + * @see com.tinkerpop.rexster.config.GraphConfiguration#configureGraphInstance(com.tinkerpop.rexster.config.GraphConfigurationContext) + */ + @Override + public BigdataGraph configureGraphInstance(final GraphConfigurationContext context) + throws GraphConfigurationException { + + try { + + return configure(context); + + } catch (Exception ex) { + + throw new GraphConfigurationException(ex); + + } + + } + + protected BigdataGraph configure(final GraphConfigurationContext context) + throws Exception { + + final Configuration config = context.getProperties(); + + if (!config.containsKey(Options.TYPE)) { + throw new GraphConfigurationException("missing required parameter: " + Options.TYPE); + } + + final String type = config.getString(Options.TYPE).toLowerCase(); + + if (Options.TYPE_EMBEDDED.equals(type)) { + + if (config.containsKey(Options.FILE)) { + + final String journal = config.getString(Options.FILE); + + return BigdataGraphFactory.create(journal); + + } else { + + return BigdataGraphFactory.create(); + + } + + } else if (Options.TYPE_REMOTE.equals(type)) { + + if (!config.containsKey(Options.HOST)) { + throw new GraphConfigurationException("missing required parameter: " + Options.HOST); + } + + if (!config.containsKey(Options.PORT)) { + throw new GraphConfigurationException("missing required parameter: " + Options.PORT); + } + + final String host = config.getString(Options.HOST); + + final int port = config.getInt(Options.PORT); + + return BigdataGraphFactory.connect(host, port); + + } else { + + throw new GraphConfigurationException("unrecognized value for " + + Options.TYPE + ": " + type); + + } + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/rexster.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/rexster.xml (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/rexster.xml 2014-05-15 21:21:54 UTC (rev 8342) @@ -0,0 +1,113 @@ +<?xml version="1.0" encoding="UTF-8"?> +<rexster> + <http> + <server-port>8182</server-port> + <server-host>0.0.0.0</server-host> + <base-uri>http://localhost</base-uri> + <web-root>public</web-root> + <character-set>UTF-8</character-set> + <enable-jmx>false</enable-jmx> + <enable-doghouse>true</enable-doghouse> + <max-post-size>2097152</max-post-size> + <max-header-size>8192</max-header-size> + <upload-timeout-millis>30000</upload-timeout-millis> + <thread-pool> + <worker> + <core-size>8</core-size> + <max-size>8</max-size> + </worker> + <kernal> + <core-size>4</core-size> + <max-size>4</max-size> + </kernal> + </thread-pool> + <io-strategy>leader-follower</io-strategy> + </http> + <rexpro> + <server-port>8184</server-port> + <server-host>0.0.0.0</server-host> + <session-max-idle>1790000</session-max-idle> + <session-check-interval>3000000</session-check-interval> + <connection-max-idle>180000</connection-max-idle> + <connection-check-interval>3000000</connection-check-interval> + <read-buffer>65536</read-buffer> + <enable-jmx>false</enable-jmx> + <thread-pool> + <worker> + <core-size>8</core-size> + <max-size>8</max-size> + </worker> + <kernal> + <core-size>4</core-size> + <max-size>4</max-size> + </kernal> + </thread-pool> + <io-strategy>leader-follower</io-strategy> + </rexpro> + <shutdown-port>8183</shutdown-port> + <shutdown-host>127.0.0.1</shutdown-host> + <config-check-interval>10000</config-check-interval> + <script-engines> + <script-engine> + <name>gremlin-groovy</name> + <reset-threshold>-1</reset-threshold> + <init-scripts>config/init.groovy</init-scripts> + <imports>com.tinkerpop.rexster.client.*</imports> + <static-imports>java.lang.Math.PI</static-imports> + </script-engine> + </script-engines> +<!-- + <security> + <authentication> + <type>none</type> + <configuration> + <users> + <user> + <username>rexster</username> + <password>rexster</password> + </user> + </users> + </configuration> + </authentication> + </security> + <metrics> + <reporter> + <type>jmx</type> + </reporter> + <reporter> + <type>http</type> + </reporter> + <reporter> + <type>console</type> + <properties> + <rates-time-unit>SECONDS</rates-time-unit> + <duration-time-unit>SECONDS</duration-time-unit> + <report-period>10</report-period> + <report-time-unit>MINUTES</report-time-unit> + <includes>http.rest.*</includes> + <excludes>http.rest.*.delete</excludes> + </properties> + </reporter> + </metrics> +--> + <graphs> + <graph> + <graph-name>bigdata</graph-name> + <graph-type>com.bigdata.blueprints.BigdataGraphConfiguration</graph-type> + <properties> +<!-- + <type>embedded</type> + <file>/tmp/bigdata.jnl</file> +--> + <type>remote</type> + <host>localhost</host> + <port>9999</port> + </properties> + <extensions> + <allows> + <allow>tp:gremlin</allow> + </allows> + </extensions> + </graph> + </graphs> +</rexster> Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/rexster.xml ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-05-15 21:21:54 UTC (rev 8342) @@ -72,8 +72,9 @@ ganglia-version=1.0.4 gas-version=0.1.0 jackson-version=2.2.3 -blueprints.version=2.4.0 +blueprints.version=2.5.0 jettison.version=1.3.3 +rexster.version=2.5.0 # Set to false to NOT start services (zookeeper, lookup server, class server, etc). # When false, tests which depend on those services will not run. (This can also be Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-15 21:21:54 UTC (rev 8342) @@ -457,7 +457,7 @@ <include name="**/*.jar" /> </fileset> <fileset dir="${bigdata.dir}/bigdata-blueprints/lib"> - <include name="blueprints-core-${blueprints.version}.jar" /> + <include name="**/*.jar" /> </fileset> <fileset dir="${bigdata.dir}/bigdata-gom/lib"> <include name="**/*.jar" /> @@ -2520,7 +2520,7 @@ <java classname="com.bigdata.rdf.sail.webapp.NanoSparqlServer" failonerror="true" fork="true" logerror="true"> <classpath refid="runtime.classpath" /> <jvmarg value="-server"/> - <jvmarg value="-Xmx1G"/> + <jvmarg value="-Xmx4G"/> <jvmarg value="-Dlog4j.configuration=bigdata-war/src/WEB-INF/classes/log4j.properties"/> <arg value="9999"/> <arg value="kb"/> @@ -2550,36 +2550,8 @@ <include name="semargl-rdf-0.4.jar"/> <include name="semargl-rdfa-0.4.jar"/> <include name="semargl-sesame-0.4.jar"/> - <include name="sesame-http-client-2.7.10.jar"/> - <include name="sesame-http-protocol-2.7.10.jar"/> - <include name="sesame-model-2.7.10.jar"/> - <include name="sesame-query-2.7.10.jar"/> - <include name="sesame-queryalgebra-evaluation-2.7.10.jar"/> - <include name="sesame-queryalgebra-model-2.7.10.jar"/> - <include name="sesame-queryparser-api-2.7.10.jar"/> - <include name="sesame-queryparser-serql-2.7.10.jar"/> - <include name="sesame-queryparser-sparql-2.7.10.jar"/> - <include name="sesame-queryresultio-api-2.7.10.jar"/> - <include name="sesame-queryresultio-sparqlxml-2.7.10.jar"/> - <include name="sesame-repository-api-2.7.10.jar"/> - <include name="sesame-repository-sparql-2.7.10.jar"/> - <include name="sesame-rio-api-2.7.10.jar"/> - <include name="sesame-rio-binary-2.7.10.jar"/> - <include name="sesame-rio-datatypes-2.7.10.jar"/> - <include name="sesame-rio-languages-2.7.10.jar"/> - <include name="sesame-rio-n3-2.7.10.jar"/> - <include name="sesame-rio-nquads-2.7.10.jar"/> - <include name="sesame-rio-ntriples-2.7.10.jar"/> - <include name="sesame-rio-rdfjson-2.7.10.jar"/> - <include name="sesame-rio-rdfxml-2.7.10.jar"/> - <include name="sesame-rio-trig-2.7.10.jar"/> - <include name="sesame-rio-trix-2.7.10.jar"/> - <include name="sesame-rio-turtle-2.7.10.jar"/> - <include name="sesame-sail-api-2.7.10.jar"/> - <include name="sesame-sail-inferencer-2.7.10.jar"/> - <include name="sesame-sail-memory-2.7.10.jar"/> - <include name="sesame-sail-nativerdf-2.7.10.jar"/> - <include name="sesame-util-2.7.10.jar"/> + <include name="sesame-*.jar"/> + <include name="neo4j-*.jar"/> <include name="bigdata-*.jar"/> </fileset> </delete> @@ -2617,4 +2589,63 @@ <target name="gremlin" depends="fetch-gremlin,install-gremlin"> </target> + <target name="fetch-rexster" depends="prepare,compile,jar"> + <echo>Installing Rexster...</echo> + <get + src="http://www.tinkerpop.com/downloads/gremlin/rexster-console-2.5.0.zip" + dest="${build.dir}/rexster-console-2.5.0.zip"/> + <unzip src="${build.dir}/rexster-console-2.5.0.zip" dest="${build.dir}/"/> + <delete file="${build.dir}/rexster-console-2.5.0.zip"/> + </target> + + <target name="install-rexster" depends="prepare,compile,jar,bundle"> + <delete> + <fileset dir="${build.dir}/rexster-server-2.5.0/lib"> + <include name="blueprints-sail-graph-2.5.0.jar"/> + <include name="jsonld-java-0.3.jar"/> + <include name="jsonld-java-sesame-0.3.jar"/> + <include name="linked-data-sail-1.1.jar"/> + <include name="repository-sail-1.8.jar"/> + <include name="semargl-core-0.4.jar"/> + <include name="semargl-rdf-0.4.jar"/> + <include name="semargl-rdfa-0.4.jar"/> + <include name="semargl-sesame-0.4.jar"/> + <include name="sesame-*.jar"/> + <include name="neo4j-*.jar"/> + <include name="bigdata-*.jar"/> + </fileset> + </delete> + <copy toDir="${build.dir}/rexster-server-2.5.0/lib" flatten="true"> + <!-- + <fileset dir="${bigdata.dir}/bigdata-rdf/lib"> + <include name="openrdf-sesame-${sesame.version}-onejar.jar" /> + </fileset> + <fileset dir="${bigdata.dir}/bigdata-sails/lib/httpcomponents"> + <include name="httpmime-${apache.httpmime.version}.jar" /> + </fileset> + --> + <fileset dir="${build.dir}/lib"> + <include name="*.jar" /> + </fileset> + <fileset dir="${build.dir}"> + <include name="${version}.jar" /> + </fileset> + </copy> + <copy toDir="${build.dir}/rexster-server-2.5.0/config/" + file="${bigdata.dir}/bigdata-blueprints/src/resources/rexster.xml" + overwrite="true"/> + <chmod file="${build.dir}/rexster-server-2.5.0/bin/rexster.sh" perm="+x"/> + <echo>Rexster installation complete. +0. Make sure the bigdata server is running: + > ant start-bigdata +1. Start the rexster server: + > ./${build.dir}/rexster-server-2.5.0/bin/rexster.sh -s -c ${build.dir}/rexster-server-2.5.0/config/rexster.xml +2. Open the DogHouse UI: http://localhost:8182/doghouse + </echo> + + </target> + + <target name="rexster" depends="fetch-rexster,install-rexster"> + </target> + </project> Added: branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml 2014-05-15 21:21:54 UTC (rev 8342) @@ -0,0 +1,54 @@ +<?xml version="1.0" encoding="UTF-8"?> +<graphml xmlns="http://graphml.graphdrawing.org/xmlns" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns + http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> + <key id="weight" for="edge" attr.name="weight" attr.type="float"/> + <key id="name" for="node" attr.name="name" attr.type="string"/> + <key id="age" for="node" attr.name="age" attr.type="int"/> + <key id="lang" for="node" attr.name="lang" attr.type="string"/> + <graph id="G" edgedefault="directed"> + <node id="1"> + <data key="name">marko</data> + <data key="age">29</data> + </node> + <node id="2"> + <data key="name">vadas</data> + <data key="age">27</data> + </node> + <node id="3"> + <data key="name">lop</data> + <data key="lang">java</data> + </node> + <node id="4"> + <data key="name">josh</data> + <data key="age">32</data> + </node> + <node id="5"> + <data key="name">ripple</data> + <data key="lang">java</data> + </node> + <node id="6"> + <data key="name">peter</data> + <data key="age">35</data> + </node> + <edge id="7" source="1" target="2" label="knows"> + <data key="weight">0.5</data> + </edge> + <edge id="8" source="1" target="4" label="knows"> + <data key="weight">1.0</data> + </edge> + <edge id="9" source="1" target="3" label="created"> + <data key="weight">0.4</data> + </edge> + <edge id="10" source="4" target="5" label="created"> + <data key="weight">1.0</data> + </edge> + <edge id="11" source="4" target="3" label="created"> + <data key="weight">0.4</data> + </edge> + <edge id="12" source="6" target="3" label="created"> + <data key="weight">0.2</data> + </edge> + </graph> +</graphml> \ No newline at end of file Property changes on: branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/pom.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/pom.xml 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/pom.xml 2014-05-15 21:21:54 UTC (rev 8342) @@ -99,8 +99,9 @@ <lgplutils.version>1.0.7-270114</lgplutils.version> <bigdata.ganglia.version>1.0.4</bigdata.ganglia.version> <jackson.version>2.2.3</jackson.version> - <blueprints.version>2.4.0</blueprints.version> + <blueprints.version>2.5.0</blueprints.version> <jettison.version>1.3.3</jettison.version> + <rexster.version>2.5.0</rexster.version> </properties> <!-- TODO Can we declare the versions of the dependencies here as properties and have them be substituted in for us? Can we pick @@ -321,6 +322,11 @@ <artifactId>blueprints-core</artifactId> <version>${blueprints.version}</version> </dependency> + <dependency> + <groupId>com.tinkerpop.rexster</groupId> + <artifactId>rexster-core</artifactId> + <version>${rexster.version}</version> + </dependency> <!-- --> <!-- artifacts that we publish (because they are not readily --> <!-- available) but we do not maintain. --> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-15 19:38:11
|
Revision: 8341 http://sourceforge.net/p/bigdata/code/8341 Author: thompsonbry Date: 2014-05-15 19:38:04 +0000 (Thu, 15 May 2014) Log Message: ----------- Working to chase down a problem with locating bigdata-war/src in the JAR when running the NSS from the command line. Refactored the logic to await the NSS start up to a timeout into the three main invocations of the NSS. This also places the code to interpret jetty.dump.start into each of these code paths in order to provide additional information on the startup contexts. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/samples/com/bigdata/samples/NSSEmbeddedExample.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-15 18:39:35 UTC (rev 8340) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-15 19:38:04 UTC (rev 8341) @@ -29,7 +29,6 @@ import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.net.InetSocketAddress; -import java.net.URL; import java.nio.ByteBuffer; import java.nio.channels.ClosedByInterruptException; import java.rmi.Remote; @@ -105,6 +104,7 @@ import com.bigdata.rdf.sail.webapp.ConfigParams; import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet; import com.bigdata.rdf.sail.webapp.NanoSparqlServer; +import com.bigdata.rdf.sail.webapp.NanoSparqlServer.SystemProperties; import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy; import com.bigdata.rwstore.RWStore; import com.bigdata.service.AbstractHATransactionService; @@ -114,7 +114,6 @@ import com.bigdata.util.StackInfoReport; import com.bigdata.util.concurrent.LatchedExecutor; import com.bigdata.util.concurrent.MonitoredFutureTask; -import com.bigdata.util.config.NicUtil; import com.sun.jini.start.LifeCycle; /** @@ -4544,55 +4543,9 @@ jettyServer = NanoSparqlServer .newInstance(jettyXml, journal, null/* initParams */); - log.warn("Starting NSS"); - - // Start the server. - jettyServer.start(); + // Wait until the server starts (up to a timeout). + NanoSparqlServer.awaitServerStart(jettyServer); - if (Boolean.getBoolean("jetty.dump.start")) { - - // Support the jetty dump-after-start semantics. - log.warn(jettyServer.dump()); - - } - - /* - * Report *an* effective URL of this service. - * - * Note: This is an effective local URL (and only one of them, and - * even then only one for the first connector). It does not reflect - * any knowledge about the desired external deployment URL for the - * service end point. - */ - final String serviceURL; - { - - final int actualPort = getNSSPort(); -// final int actualPort = jettyServer.getConnectors()[0] -// .getLocalPort(); - - String hostAddr = NicUtil.getIpAddress("default.nic", - "default", true/* loopbackOk */); - - if (hostAddr == null) { - - hostAddr = "localhost"; - - } - - serviceURL = new URL("http", hostAddr, actualPort, ""/* file */) - .toExternalForm(); - - final String msg = "logicalServiceZPath: " - + logicalServiceZPath + "\n" + "serviceURL: " - + serviceURL; - - System.out.println(msg); - if (log.isInfoEnabled()) - log.warn(msg); - - } - } catch (Exception e1) { // Log and ignore. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-15 18:39:35 UTC (rev 8340) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-15 19:38:04 UTC (rev 8341) @@ -115,6 +115,12 @@ String JETTY_STARTUP_TIMEOUT = "jetty.start.timeout"; String DEFAULT_JETTY_STARTUP_TIMEOUT = "10"; + + /** + * When <code>true</code>, the state of jetty will be dumped onto a + * logger after the server start. + */ + String JETTY_DUMP_START = "jetty.dump.start"; } @@ -338,42 +344,12 @@ initParams.put(ConfigParams.SERVLET_CONTEXT_LISTENER_CLASS, servletContextListenerClass); - final long jettyStartTimeout = Long.parseLong(System.getProperty( - SystemProperties.JETTY_STARTUP_TIMEOUT, - SystemProperties.DEFAULT_JETTY_STARTUP_TIMEOUT)); + // Create the service. + final Server server = NanoSparqlServer.newInstance(port, jettyXml, + null/* indexManager */, initParams); - final Server server = awaitServerStart(port, jettyXml, initParams, - jettyStartTimeout, TimeUnit.SECONDS); + awaitServerStart(server); - /* - * Report *an* effective URL of this service. - * - * Note: This is an effective local URL (and only one of them, and - * even then only one for the first connector). It does not reflect - * any knowledge about the desired external deployment URL for the - * service end point. - */ - final String serviceURL; - { - - final int actualPort = getLocalPort(server); - - String hostAddr = NicUtil.getIpAddress("default.nic", "default", - true/* loopbackOk */); - - if (hostAddr == null) { - - hostAddr = "localhost"; - - } - - serviceURL = new URL("http", hostAddr, actualPort, ""/* file */) - .toExternalForm(); - - System.out.println("serviceURL: " + serviceURL); - - } - // Wait for the service to terminate. server.join(); @@ -382,37 +358,25 @@ /** * Await a {@link Server} start up to a timeout. * - * @param port - * The port (maybe ZERO for a random port). - * @param jettyXml - * The location of the <code>jetty.xml</code> file. - * @param initParams - * The init-param overrides. - * @param timeout - * The timeout. - * @param units - * - * @return The server iff the server started before the timeout. - * + * @parma server The {@link Server} to start. * @throws InterruptedException * @throws TimeoutException * @throws Exception */ - private static Server awaitServerStart(final int port, - final String jettyXml, final Map<String, String> initParams, - final long timeout, final TimeUnit units) + public static void awaitServerStart(final Server server) throws InterruptedException, TimeoutException, Exception { - Server server = null; + final long timeout = Long.parseLong(System.getProperty( + SystemProperties.JETTY_STARTUP_TIMEOUT, + SystemProperties.DEFAULT_JETTY_STARTUP_TIMEOUT)); + boolean ok = false; final long begin = System.nanoTime(); - final long nanos = units.toNanos(timeout); + final long nanos = TimeUnit.SECONDS.toNanos(timeout); long remaining = nanos; try { - // Create the service. - server = NanoSparqlServer.newInstance(port, jettyXml, - null/* indexManager */, initParams); // Start Server. + log.warn("Starting NSS"); server.start(); // Await running. remaining = nanos - (System.nanoTime() - begin); @@ -432,13 +396,59 @@ System.err.println(msg); log.fatal(msg); if (server != null) { + /* + * Support the jetty dump-after-start semantics. + */ + if (Boolean.getBoolean(SystemProperties.JETTY_DUMP_START)) { + log.warn(server.dump()); + } server.stop(); server.destroy(); } } } - return server; + /* + * Support the jetty dump-after-start semantics. + */ + if (Boolean.getBoolean(SystemProperties.JETTY_DUMP_START)) { + log.warn(server.dump()); + } + + /* + * Report *an* effective URL of this service. + * + * Note: This is an effective local URL (and only one of them, and even + * then only one for the first connector). It does not reflect any + * knowledge about the desired external deployment URL for the service + * end point. + */ + final String serviceURL; + { + + final int actualPort = getLocalPort(server); + + String hostAddr = NicUtil.getIpAddress("default.nic", "default", + true/* loopbackOk */); + + if (hostAddr == null) { + + hostAddr = "localhost"; + + } + + serviceURL = new URL("http", hostAddr, actualPort, ""/* file */) + .toExternalForm(); + + final String msg = "serviceURL: " + serviceURL; + + System.out.println(msg); + + if (log.isInfoEnabled()) + log.warn(msg); + + } + } /** @@ -528,9 +538,7 @@ } /** - * Variant used when you already have the {@link IIndexManager} on hand and - * want to use <code>web.xml</code> to configure the {@link WebAppContext} - * and <code>jetty.xml</code> to configure the jetty {@link Server}. + * Variant used when you already have the {@link IIndexManager}. * <p> * When the optional {@link IIndexManager} argument is specified, it will be * set as an attribute on the {@link WebAppContext}. This will cause the @@ -563,9 +571,11 @@ * Allow configuration of embedded NSS jetty server using jetty-web.xml * </a> */ - static public Server newInstance(final String jettyXml, - final IIndexManager indexManager, - final Map<String, String> initParams) throws Exception { + static public Server newInstance(// + final String jettyXml,// + final IIndexManager indexManager,// + final Map<String, String> initParams// + ) throws Exception { if (jettyXml == null) throw new IllegalArgumentException(); @@ -676,10 +686,12 @@ */ if (initParams != null) { - wac.setAttribute(BigdataRDFServletContextListener.INIT_PARAM_OVERRIDES, initParams); + wac.setAttribute( + BigdataRDFServletContextListener.INIT_PARAM_OVERRIDES, + initParams); } - + } return server; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/samples/com/bigdata/samples/NSSEmbeddedExample.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/samples/com/bigdata/samples/NSSEmbeddedExample.java 2014-05-15 18:39:35 UTC (rev 8340) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/samples/com/bigdata/samples/NSSEmbeddedExample.java 2014-05-15 19:38:04 UTC (rev 8341) @@ -1,6 +1,5 @@ package com.bigdata.samples; -import java.net.URL; import java.util.LinkedHashMap; import java.util.Map; @@ -10,7 +9,6 @@ import com.bigdata.journal.IIndexManager; import com.bigdata.rdf.sail.BigdataSail; import com.bigdata.rdf.sail.webapp.NanoSparqlServer; -import com.bigdata.util.config.NicUtil; /** * Class demonstrates how to start the {@link NanoSparqlServer} from within @@ -56,24 +54,8 @@ server = NanoSparqlServer.newInstance(port, indexManager, initParams); - server.start(); + NanoSparqlServer.awaitServerStart(server); - final int actualPort = NanoSparqlServer.getLocalPort(server); - - String hostAddr = NicUtil.getIpAddress("default.nic", - "default", true/* loopbackOk */); - - if (hostAddr == null) { - - hostAddr = "localhost"; - - } - - final String serviceURL = new URL("http", hostAddr, actualPort, ""/* file */) - .toExternalForm(); - - System.out.println("serviceURL: " + serviceURL); - // Block and wait. The NSS is running. server.join(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-15 18:39:41
|
Revision: 8340 http://sourceforge.net/p/bigdata/code/8340 Author: thompsonbry Date: 2014-05-15 18:39:35 +0000 (Thu, 15 May 2014) Log Message: ----------- Modified startHAServices to pass along environment variables to control the jetty thread pool. Modified jetty.xml to unpack the war per webtide guidence. This only happens if necessary. Modified NanoSparqlServer to detect a failure to start and throw out an exception. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-15 18:38:16 UTC (rev 8339) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-15 18:39:35 UTC (rev 8340) @@ -28,6 +28,8 @@ import java.net.URL; import java.util.LinkedHashMap; import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import javax.servlet.ServletContextListener; @@ -106,6 +108,14 @@ */ String DEFAULT_JETTY_XML = "jetty.xml"; + /** + * The timeout in seconds that we will await the start of the jetty + * {@link Server} (default {@value #DEFAULT_JETTY_START_TIMEOUT}). + */ + String JETTY_STARTUP_TIMEOUT = "jetty.start.timeout"; + + String DEFAULT_JETTY_STARTUP_TIMEOUT = "10"; + } /** @@ -328,26 +338,12 @@ initParams.put(ConfigParams.SERVLET_CONTEXT_LISTENER_CLASS, servletContextListenerClass); - final Server server; + final long jettyStartTimeout = Long.parseLong(System.getProperty( + SystemProperties.JETTY_STARTUP_TIMEOUT, + SystemProperties.DEFAULT_JETTY_STARTUP_TIMEOUT)); - boolean ok = false; - try { - // Create the service. - server = NanoSparqlServer.newInstance(port, jettyXml, - null/* indexManager */, initParams); - // Start Server. - server.start(); - // Await running. - while (server.isStarting() && !server.isRunning()) { - Thread.sleep(100/* ms */); - } - ok = true; - } finally { - if (!ok) { - // Complain if Server did not start. - System.err.println("Server did not start."); - } - } + final Server server = awaitServerStart(port, jettyXml, initParams, + jettyStartTimeout, TimeUnit.SECONDS); /* * Report *an* effective URL of this service. @@ -384,6 +380,68 @@ } /** + * Await a {@link Server} start up to a timeout. + * + * @param port + * The port (maybe ZERO for a random port). + * @param jettyXml + * The location of the <code>jetty.xml</code> file. + * @param initParams + * The init-param overrides. + * @param timeout + * The timeout. + * @param units + * + * @return The server iff the server started before the timeout. + * + * @throws InterruptedException + * @throws TimeoutException + * @throws Exception + */ + private static Server awaitServerStart(final int port, + final String jettyXml, final Map<String, String> initParams, + final long timeout, final TimeUnit units) + throws InterruptedException, TimeoutException, Exception { + + Server server = null; + boolean ok = false; + final long begin = System.nanoTime(); + final long nanos = units.toNanos(timeout); + long remaining = nanos; + try { + // Create the service. + server = NanoSparqlServer.newInstance(port, jettyXml, + null/* indexManager */, initParams); + // Start Server. + server.start(); + // Await running. + remaining = nanos - (System.nanoTime() - begin); + while (server.isStarting() && !server.isRunning() && remaining > 0) { + Thread.sleep(100/* ms */); + // remaining = nanos - (now - begin) [aka elapsed] + remaining = nanos - (System.nanoTime() - begin); + } + if (remaining < 0) { + throw new TimeoutException(); + } + ok = true; + } finally { + if (!ok) { + // Complain if Server did not start. + final String msg = "Server did not start."; + System.err.println(msg); + log.fatal(msg); + if (server != null) { + server.stop(); + server.destroy(); + } + } + } + return server; + + } + + /** * Start the embedded {@link Server}. * <p> * Note: The port override argument given here is applied by setting the Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-15 18:38:16 UTC (rev 8339) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-15 18:39:35 UTC (rev 8340) @@ -149,7 +149,7 @@ <Set name="contextPath">/bigdata</Set> <Set name="descriptor">WEB-INF/web.xml</Set> <Set name="parentLoaderPriority">true</Set> - <Set name="extractWAR">false</Set> + <Set name="extractWAR">true</Set> </New> </Arg> </Call> Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-15 18:38:16 UTC (rev 8339) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-15 18:39:35 UTC (rev 8340) @@ -73,6 +73,9 @@ -DHA_PORT=${HA_PORT}\ "-Dcom.bigdata.hostname=${BIGDATA_HOSTNAME}"\ "-Djetty.port=${JETTY_PORT}"\ + "-Djetty.threads.min=${JETTY_THREADS_MIN}"\ + "-Djetty.threads.max=${JETTY_THREADS_MAX}"\ + "-Djetty.threads.timeout=${JETTY_THREADS_TIMEOUT}\" "-Djetty.resourceBase=${JETTY_RESOURCE_BASE}"\ "-DJETTY_XML=${JETTY_XML}"\ -DCOLLECT_QUEUE_STATISTICS=${COLLECT_QUEUE_STATISTICS}\ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-15 18:38:19
|
Revision: 8339 http://sourceforge.net/p/bigdata/code/8339 Author: thompsonbry Date: 2014-05-15 18:38:16 +0000 (Thu, 15 May 2014) Log Message: ----------- removed duplicate of gom/src in build.xml Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.xml Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-15 18:02:59 UTC (rev 8338) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-15 18:38:16 UTC (rev 8339) @@ -312,7 +312,6 @@ <fileset dir="${bigdata.dir}/bigdata/src/samples" /> <fileset dir="${bigdata.dir}/bigdata-ganglia/src/java" /> <fileset dir="${bigdata.dir}/bigdata-gas/src/java" /> - <fileset dir="${bigdata.dir}/bigdata-gom/src/java" /> <fileset dir="${bigdata.dir}/bigdata-jini/src/java" /> <fileset dir="${bigdata.dir}/bigdata-rdf/src/java" /> <fileset dir="${bigdata.dir}/bigdata-rdf/src/samples" /> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-05-15 18:03:02
|
Revision: 8338 http://sourceforge.net/p/bigdata/code/8338 Author: tobycraig Date: 2014-05-15 18:02:59 +0000 (Thu, 15 May 2014) Log Message: ----------- Fixed bug with status panel links not working properly Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js 2014-05-15 15:26:56 UTC (rev 8337) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js 2014-05-15 18:02:59 UTC (rev 8338) @@ -1151,8 +1151,8 @@ function getStatusNumbers(data) { $('#status-text').html(data); - $('#status-text a').eq(1).click(function(e) { e.preventDefault(); showQueries(false); return false; }); - $('#status-text a').eq(2).click(function(e) { e.preventDefault(); showQueries(true); return false; }); + $('#status-text a[href*="status"]').eq(0).click(function(e) { e.preventDefault(); showQueries(false); return false; }); + $('#status-text a[href*="status"]').eq(1).click(function(e) { e.preventDefault(); showQueries(true); return false; }); } $('#show-queries').click(function(e) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-15 15:26:58
|
Revision: 8337 http://sourceforge.net/p/bigdata/code/8337 Author: thompsonbry Date: 2014-05-15 15:26:56 +0000 (Thu, 15 May 2014) Log Message: ----------- renamed the http header to X-BIGDATA-MAX-QUERY-MILLIS. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-05-15 15:14:55 UTC (rev 8336) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-05-15 15:26:56 UTC (rev 8337) @@ -217,7 +217,7 @@ * * @see http://trac.bigdata.com/ticket/914 (Set timeout on remote query) */ - static private final String HTTP_HEADER_BIGDATA_MAX_QUERY_MILLIS = "BIGDATA_MAX_QUERY_MILLIS"; + static private final String HTTP_HEADER_BIGDATA_MAX_QUERY_MILLIS = "X-BIGDATA-MAX-QUERY-MILLIS"; private final SparqlEndpointConfig m_config; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-05-15 15:14:55 UTC (rev 8336) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-05-15 15:26:56 UTC (rev 8337) @@ -192,7 +192,7 @@ * * @see http://trac.bigdata.com/ticket/914 (Set timeout on remote query) */ - static private final String HTTP_HEADER_BIGDATA_MAX_QUERY_MILLIS = "BIGDATA_MAX_QUERY_MILLIS"; + static private final String HTTP_HEADER_BIGDATA_MAX_QUERY_MILLIS = "X-BIGDATA-MAX-QUERY-MILLIS"; /** * When <code>true</code>, the REST API methods will use the load balancer This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-15 15:14:58
|
Revision: 8336 http://sourceforge.net/p/bigdata/code/8336 Author: thompsonbry Date: 2014-05-15 15:14:55 +0000 (Thu, 15 May 2014) Log Message: ----------- Added support for setMaxQuery(). This does not yet allow people to (easily) set query timeouts of less than one second (which is the API granularity for openrdf). However, the timeouts ARE communicated in milliseconds using an HTTP header. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQuery.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/StringUtil.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java 2014-05-15 14:05:03 UTC (rev 8335) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java 2014-05-15 15:14:55 UTC (rev 8336) @@ -33,7 +33,6 @@ import org.apache.http.impl.client.DefaultRedirectStrategy; import org.openrdf.model.ValueFactory; import org.openrdf.repository.Repository; -import org.openrdf.repository.RepositoryConnection; import org.openrdf.repository.RepositoryException; import com.bigdata.rdf.sail.webapp.client.DefaultClientConnectionManagerFactory; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java 2014-05-15 14:05:03 UTC (rev 8335) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java 2014-05-15 15:14:55 UTC (rev 8336) @@ -33,6 +33,7 @@ import java.io.Reader; import java.net.URL; import java.util.Iterator; +import java.util.concurrent.TimeUnit; import org.apache.log4j.Logger; import org.openrdf.model.Graph; @@ -95,16 +96,18 @@ */ public class BigdataSailRemoteRepositoryConnection implements RepositoryConnection { - private static final transient Logger log = Logger.getLogger(BigdataSailRemoteRepositoryConnection.class); + private static final transient Logger log = Logger + .getLogger(BigdataSailRemoteRepositoryConnection.class); + + private final BigdataSailRemoteRepository repo; + + public BigdataSailRemoteRepositoryConnection( + final BigdataSailRemoteRepository repo) { + + this.repo = repo; + + } - private final BigdataSailRemoteRepository repo; - - public BigdataSailRemoteRepositoryConnection(final BigdataSailRemoteRepository repo) { - - this.repo = repo; - - } - public long count(final Resource s, final URI p, final Value o, final Resource... c) throws RepositoryException { @@ -124,9 +127,9 @@ } @Override - public RepositoryResult<Statement> getStatements(Resource s, URI p, - Value o, boolean includeInferred, Resource... c) - throws RepositoryException { + public RepositoryResult<Statement> getStatements(final Resource s, + final URI p, final Value o, final boolean includeInferred, + final Resource... c) throws RepositoryException { try { @@ -187,8 +190,9 @@ } @Override - public boolean hasStatement(Resource s, URI p, Value o, - boolean includeInferred, Resource... c) throws RepositoryException { + public boolean hasStatement(final Resource s, final URI p, final Value o, + final boolean includeInferred, final Resource... c) + throws RepositoryException { try { @@ -205,8 +209,9 @@ } @Override - public BooleanQuery prepareBooleanQuery(QueryLanguage ql, String query) - throws RepositoryException, MalformedQueryException { + public BooleanQuery prepareBooleanQuery(final QueryLanguage ql, + final String query) throws RepositoryException, + MalformedQueryException { if (ql != QueryLanguage.SPARQL) { @@ -234,17 +239,32 @@ } } - @Override - public int getMaxQueryTime() { - throw new UnsupportedOperationException(); - } - + /** + * @see http://trac.bigdata.com/ticket/914 (Set timeout on remote query) + */ + @Override + public int getMaxQueryTime() { + + final long millis = q.getMaxQueryMillis(); + + if (millis == -1) { + // Note: -1L is returned if the http header is not specified. + return -1; + + } + + return (int) TimeUnit.MILLISECONDS.toSeconds(millis); + + } + /** * @see http://trac.bigdata.com/ticket/914 (Set timeout on remote query) */ @Override - public void setMaxQueryTime(int arg0) { - throw new UnsupportedOperationException(); + public void setMaxQueryTime(final int seconds) { + + q.setMaxQueryMillis(TimeUnit.SECONDS.toMillis(seconds)); + } @Override @@ -298,9 +318,10 @@ } @Override - public BooleanQuery prepareBooleanQuery(QueryLanguage ql, String query, - String baseURI) throws RepositoryException, MalformedQueryException { - + public BooleanQuery prepareBooleanQuery(final QueryLanguage ql, + final String query, final String baseURI) + throws RepositoryException, MalformedQueryException { + if (baseURI != null) throw new UnsupportedOperationException("baseURI not supported"); @@ -309,8 +330,9 @@ } @Override - public GraphQuery prepareGraphQuery(QueryLanguage ql, String query) - throws RepositoryException, MalformedQueryException { + public GraphQuery prepareGraphQuery(final QueryLanguage ql, + final String query) throws RepositoryException, + MalformedQueryException { if (ql != QueryLanguage.SPARQL) { @@ -338,18 +360,35 @@ } } - @Override - public int getMaxQueryTime() { - throw new UnsupportedOperationException(); - } - /** - * @see http://trac.bigdata.com/ticket/914 (Set timeout on remote query) + * @see http://trac.bigdata.com/ticket/914 (Set timeout on + * remote query) */ - @Override - public void setMaxQueryTime(int arg0) { - throw new UnsupportedOperationException(); - } + @Override + public int getMaxQueryTime() { + + final long millis = q.getMaxQueryMillis(); + + if (millis == -1) { + // Note: -1L is returned if the http header is not specified. + return -1; + + } + + return (int) TimeUnit.MILLISECONDS.toSeconds(millis); + + } + + /** + * @see http://trac.bigdata.com/ticket/914 (Set timeout on + * remote query) + */ + @Override + public void setMaxQueryTime(final int seconds) { + + q.setMaxQueryMillis(TimeUnit.SECONDS.toMillis(seconds)); + + } @Override public void clearBindings() { @@ -408,8 +447,9 @@ } @Override - public GraphQuery prepareGraphQuery(QueryLanguage ql, String query, - String baseURI) throws RepositoryException, MalformedQueryException { + public GraphQuery prepareGraphQuery(final QueryLanguage ql, + final String query, final String baseURI) + throws RepositoryException, MalformedQueryException { if (baseURI != null) throw new UnsupportedOperationException("baseURI not supported."); @@ -419,27 +459,29 @@ } @Override - public Query prepareQuery(QueryLanguage ql, String query) - throws RepositoryException, MalformedQueryException { - + public Query prepareQuery(final QueryLanguage ql, final String query) + throws RepositoryException, MalformedQueryException { + throw new UnsupportedOperationException("please use the specific operation for your query type: prepare[Boolean/Tuple/Graph]Query"); } @Override - public Query prepareQuery(QueryLanguage ql, String query, String baseURI) - throws RepositoryException, MalformedQueryException { + public Query prepareQuery(final QueryLanguage ql, final String query, + final String baseURI) throws RepositoryException, + MalformedQueryException { if (baseURI != null) throw new UnsupportedOperationException("baseURI not supported"); return prepareQuery(ql, query); - } + } - @Override - public TupleQuery prepareTupleQuery(QueryLanguage ql, String query) - throws RepositoryException, MalformedQueryException { + @Override + public TupleQuery prepareTupleQuery(final QueryLanguage ql, + final String query) throws RepositoryException, + MalformedQueryException { if (ql != QueryLanguage.SPARQL) { @@ -466,19 +508,36 @@ throw new QueryEvaluationException(ex); } } - - @Override + + /** + * @see http://trac.bigdata.com/ticket/914 (Set timeout on + * remote query) + */ + @Override public int getMaxQueryTime() { - throw new UnsupportedOperationException(); + + final long millis = q.getMaxQueryMillis(); + + if (millis == -1) { + // Note: -1L is returned if the http header is not specified. + return -1; + + } + + return (int) TimeUnit.MILLISECONDS.toSeconds(millis); + } - /** - * @see http://trac.bigdata.com/ticket/914 (Set timeout on remote query) - */ - @Override - public void setMaxQueryTime(int arg0) { - throw new UnsupportedOperationException(); - } + /** + * @see http://trac.bigdata.com/ticket/914 (Set timeout on + * remote query) + */ + @Override + public void setMaxQueryTime(final int seconds) { + + q.setMaxQueryMillis(TimeUnit.SECONDS.toMillis(seconds)); + + } @Override public void clearBindings() { @@ -537,8 +596,9 @@ } @Override - public TupleQuery prepareTupleQuery(QueryLanguage ql, String query, - String baseURI) throws RepositoryException, MalformedQueryException { + public TupleQuery prepareTupleQuery(final QueryLanguage ql, + final String query, final String baseURI) + throws RepositoryException, MalformedQueryException { if (baseURI != null) throw new UnsupportedOperationException("baseURI not supported."); @@ -547,17 +607,18 @@ } @Override - public boolean hasStatement(Statement s, boolean includeInferred, Resource... c) - throws RepositoryException { + public boolean hasStatement(final Statement s, + final boolean includeInferred, final Resource... c) + throws RepositoryException { return hasStatement(s.getSubject(), s.getPredicate(), s.getObject(), includeInferred, c); } @Override - public <E extends Exception> void add( - Iteration<? extends Statement, E> stmts, Resource... c) - throws RepositoryException, E { + public <E extends Exception> void add( + final Iteration<? extends Statement, E> stmts, final Resource... c) + throws RepositoryException, E { final Graph g = new GraphImpl(); while (stmts.hasNext()) { @@ -569,15 +630,20 @@ } @Override - public void add(Resource s, URI p, Value o, Resource... c) - throws RepositoryException { + public void add(final Resource s, final URI p, final Value o, + final Resource... c) throws RepositoryException { add(new StatementImpl(s, p, o), c); } + /** + * <strong>single statement updates not recommended</strong> + * <p> + * {@inheritDoc} + */ @Override - public void add(Statement stmt, Resource... c) + public void add(final Statement stmt, final Resource... c) throws RepositoryException { log.warn("single statement updates not recommended"); @@ -589,9 +655,9 @@ } - @Override - public void add(Iterable<? extends Statement> stmts, Resource... c) - throws RepositoryException { + @Override + public void add(final Iterable<? extends Statement> stmts, + final Resource... c) throws RepositoryException { final AddOp op = new AddOp(stmts); @@ -603,8 +669,9 @@ * TODO support baseURI */ @Override - public void add(Reader input, String baseURI, RDFFormat format, Resource... c) - throws IOException, RDFParseException, RepositoryException { + public void add(final Reader input, final String baseURI, + final RDFFormat format, final Resource... c) throws IOException, + RDFParseException, RepositoryException { final AddOp op = new AddOp(input, format); @@ -616,8 +683,9 @@ * TODO support baseURI */ @Override - public void add(URL input, String baseURI, RDFFormat format, Resource... c) - throws IOException, RDFParseException, RepositoryException { + public void add(final URL input, final String baseURI, + final RDFFormat format, final Resource... c) throws IOException, + RDFParseException, RepositoryException { final AddOp op = new AddOp(input.toString()); @@ -629,8 +697,9 @@ * TODO support baseURI */ @Override - public void add(File input, String baseURI, RDFFormat format, Resource... c) - throws IOException, RDFParseException, RepositoryException { + public void add(final File input, final String baseURI, + final RDFFormat format, final Resource... c) throws IOException, + RDFParseException, RepositoryException { final AddOp op = new AddOp(input, format); @@ -641,9 +710,10 @@ /** * TODO support baseURI */ - @Override - public void add(InputStream input, String baseURI, RDFFormat format, Resource... c) - throws IOException, RDFParseException, RepositoryException { + @Override + public void add(final InputStream input, final String baseURI, + final RDFFormat format, final Resource... c) throws IOException, + RDFParseException, RepositoryException { final AddOp op = new AddOp(input, format); @@ -671,9 +741,9 @@ } @Override - public <E extends Exception> void remove( - Iteration<? extends Statement, E> stmts, Resource... c) - throws RepositoryException, E { + public <E extends Exception> void remove( + final Iteration<? extends Statement, E> stmts, final Resource... c) + throws RepositoryException, E { final Graph g = new GraphImpl(); while (stmts.hasNext()) @@ -683,8 +753,13 @@ } - @Override - public void remove(Statement stmt, Resource... c) + /** + * <strong>single statement updates not recommended</strong> + * <p> + * {@inheritDoc} + */ + @Override + public void remove(final Statement stmt, final Resource... c) throws RepositoryException { log.warn("single statement updates not recommended"); @@ -697,8 +772,8 @@ } @Override - public void remove(Iterable<? extends Statement> stmts, Resource... c) - throws RepositoryException { + public void remove(final Iterable<? extends Statement> stmts, + final Resource... c) throws RepositoryException { final RemoveOp op = new RemoveOp(stmts); @@ -707,8 +782,8 @@ } @Override - public void remove(Resource s, URI p, Value o, Resource... c) - throws RepositoryException { + public void remove(final Resource s, URI p, Value o, final Resource... c) + throws RepositoryException { final RemoveOp op = new RemoveOp(s, p, o, c); @@ -736,11 +811,12 @@ } @Override - public void setAutoCommit(boolean autoCommit) throws RepositoryException { + public void setAutoCommit(final boolean autoCommit) throws RepositoryException { - if (autoCommit == false) - throw new IllegalArgumentException("only auto-commit is currently supported"); - + if (autoCommit == false) + throw new IllegalArgumentException( + "only auto-commit is currently supported"); + } @Override @@ -815,7 +891,7 @@ } @Override - public long size(Resource... c) throws RepositoryException { + public long size(final Resource... c) throws RepositoryException { try { @@ -832,14 +908,14 @@ } @Override - public void clear(Resource... c) throws RepositoryException { + public void clear(final Resource... c) throws RepositoryException { remove(null, null, null, c); } @Override - public void export(RDFHandler handler, Resource... c) + public void export(final RDFHandler handler, final Resource... c) throws RepositoryException, RDFHandlerException { exportStatements(null, null, null, true, handler, c); @@ -888,8 +964,8 @@ @Override - public Update prepareUpdate(QueryLanguage ql, String query) - throws RepositoryException, MalformedQueryException { + public Update prepareUpdate(final QueryLanguage ql, final String query) + throws RepositoryException, MalformedQueryException { if (ql != QueryLanguage.SPARQL) { @@ -967,9 +1043,10 @@ } - @Override - public Update prepareUpdate(QueryLanguage ql, String query, String baseURI) - throws RepositoryException, MalformedQueryException { + @Override + public Update prepareUpdate(final QueryLanguage ql, final String query, + final String baseURI) throws RepositoryException, + MalformedQueryException { if (baseURI != null) throw new UnsupportedOperationException("baseURI not supported"); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-05-15 14:05:03 UTC (rev 8335) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-05-15 15:14:55 UTC (rev 8336) @@ -95,6 +95,7 @@ import com.bigdata.rdf.sail.ISPARQLUpdateListener; import com.bigdata.rdf.sail.SPARQLUpdateEvent; import com.bigdata.rdf.sail.sparql.Bigdata2ASTSPARQLParser; +import com.bigdata.rdf.sail.webapp.client.StringUtil; import com.bigdata.rdf.sparql.ast.ASTContainer; import com.bigdata.rdf.sparql.ast.QueryHints; import com.bigdata.rdf.sparql.ast.QueryOptimizerEnum; @@ -211,8 +212,15 @@ */ protected static final String NAMESPACE = "namespace"; - private final SparqlEndpointConfig m_config; + /** + * HTTP header may be used to specify the timeout for a query. + * + * @see http://trac.bigdata.com/ticket/914 (Set timeout on remote query) + */ + static private final String HTTP_HEADER_BIGDATA_MAX_QUERY_MILLIS = "BIGDATA_MAX_QUERY_MILLIS"; + private final SparqlEndpointConfig m_config; + /** * A thread pool for running accepted queries against the * {@link QueryEngine}. @@ -1024,14 +1032,36 @@ */ private AbstractQuery newQuery(final BigdataSailRepositoryConnection cxn) { - final long queryTimeout = getConfig().queryTimeout; - - if (queryTimeout > 0) { + /* + * Establish the query timeout. This may be set in web.xml, which + * overrides all queries and sets a maximum allowed time for query + * execution. This may also be set either via setMaxQuery() or + * setMaxQueryMillis() which set a HTTP header (in milliseconds). + */ + long queryTimeoutMillis = getConfig().queryTimeout; + { + final String s = req + .getHeader(HTTP_HEADER_BIGDATA_MAX_QUERY_MILLIS); + if (s != null) { + long tmp = StringUtil.toLong(s); + if (tmp != -1L && // + (queryTimeoutMillis == 0/* noLimit */ + || // + tmp < queryTimeoutMillis/* shorterLimit */)// + ) { + // Set based on the http header value. + queryTimeoutMillis = tmp; + } + } + } + + if (queryTimeoutMillis > 0) { + final QueryRoot originalQuery = astContainer.getOriginalAST(); - originalQuery.setTimeout(queryTimeout); - + originalQuery.setTimeout(queryTimeoutMillis); + } // final ASTContainer astContainer = ((BigdataParsedQuery) parsedQuery) Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQuery.java 2014-05-15 14:05:03 UTC (rev 8335) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQuery.java 2014-05-15 15:14:55 UTC (rev 8336) @@ -61,6 +61,27 @@ void setAcceptHeader(String value); /** + * Specify the maximum time in milliseconds that the query will be permitted + * to run. A negative or zero value indicates an unlimited query time (which + * is the default). + * + * @param millis + * The timeout in milliseconds. + * + * @see http://trac.bigdata.com/ticket/914 (Set timeout on remote query) + */ + void setMaxQueryMillis(long millis); + + /** + * Return the maximum time in milliseconds that the query will be permitted + * to run. A negative or zero value indicates an unlimited query time (which + * is the default). + * + * @return The timeout in milliseceonds. + */ + long getMaxQueryMillis(); + + /** * Return the value of the specified HTTP header. * * @param name Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-05-15 14:05:03 UTC (rev 8335) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-05-15 15:14:55 UTC (rev 8336) @@ -188,6 +188,13 @@ static public final int DEFAULT_MAX_REQUEST_URL_LENGTH = 1000; /** + * HTTP header may be used to specify the timeout for a query. + * + * @see http://trac.bigdata.com/ticket/914 (Set timeout on remote query) + */ + static private final String HTTP_HEADER_BIGDATA_MAX_QUERY_MILLIS = "BIGDATA_MAX_QUERY_MILLIS"; + + /** * When <code>true</code>, the REST API methods will use the load balancer * aware requestURLs. The load balancer has essentially zero cost when not * using HA, so it is recommended to always specify <code>true</code>. When @@ -1145,8 +1152,38 @@ opts.setHeader(name, value); } + + @Override + public void setMaxQueryMillis(final long timeout) { + + opts.setHeader(HTTP_HEADER_BIGDATA_MAX_QUERY_MILLIS, + Long.toString(timeout)); + + } + /** + * {@inheritDoc} + * <p> + * Note: <code>-1L</code> is returned if the http header is not + * specified. + */ @Override + public long getMaxQueryMillis() { + + final String s = opts + .getHeader(HTTP_HEADER_BIGDATA_MAX_QUERY_MILLIS); + + if (s == null) { + + return -1L; + + } + + return StringUtil.toLong(s); + + } + + @Override public String getHeader(final String name) { return opts.getHeader(name); Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/StringUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/StringUtil.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/StringUtil.java 2014-05-15 15:14:55 UTC (rev 8336) @@ -0,0 +1,67 @@ +// +// ======================================================================== +// Copyright (c) 1995-2014 Mort Bay Consulting Pty. Ltd. +// ------------------------------------------------------------------------ +// All rights reserved. This program and the accompanying materials +// are made available under the terms of the Eclipse Public License v1.0 +// and Apache License v2.0 which accompanies this distribution. +// +// The Eclipse Public License is available at +// http://www.eclipse.org/legal/epl-v10.html +// +// The Apache License v2.0 is available at +// http://www.opensource.org/licenses/apache2.0.php +// +// You may elect to redistribute this code under either of these licenses. +// ======================================================================== +// +/* + * Note: This class was extracted from org.eclipse.jetty.util.StringUtil. + * It contains only those methods that we need that are not already part + * of the general servlet API. (We can not rely on jetty being present + * since the WAR deployment does not bundle the jetty dependencies.) + */ +package com.bigdata.rdf.sail.webapp.client; + +/** Fast String Utilities. +* +* These string utilities provide both convenience methods and +* performance improvements over most standard library versions. The +* main aim of the optimizations is to avoid object creation unless +* absolutely required. +*/ +public class StringUtil { + + /** + * Convert String to an long. Parses up to the first non-numeric character. + * If no number is found an IllegalArgumentException is thrown + * + * @param string + * A String containing an integer. + * @return an int + */ + public static long toLong(String string) { + long val = 0; + boolean started = false; + boolean minus = false; + + for (int i = 0; i < string.length(); i++) { + char b = string.charAt(i); + if (b <= ' ') { + if (started) + break; + } else if (b >= '0' && b <= '9') { + val = val * 10L + (b - '0'); + started = true; + } else if (b == '-' && !started) { + minus = true; + } else + break; + } + + if (started) + return minus ? (-val) : val; + throw new NumberFormatException(string); + } + +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-05-15 14:05:06
|
Revision: 8335 http://sourceforge.net/p/bigdata/code/8335 Author: mrpersonick Date: 2014-05-15 14:05:03 +0000 (Thu, 15 May 2014) Log Message: ----------- using a different properties file for starting a Blueprints server Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/GraphStore.properties Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/GraphStore.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/GraphStore.properties (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/GraphStore.properties 2014-05-15 14:05:03 UTC (rev 8335) @@ -0,0 +1,40 @@ +# +# Note: These options are applied when the journal and the triple store are +# first created. + +## +## Journal options. +## + +# The backing file. This contains all your data. You want to put this someplace +# safe. The default locator will wind up in the directory from which you start +# your servlet container. +com.bigdata.journal.AbstractJournal.file=bigdata.jnl + +# The persistence engine. Use 'Disk' for the WORM or 'DiskRW' for the RWStore. +com.bigdata.journal.AbstractJournal.bufferMode=DiskRW + +# Setup for the RWStore recycler rather than session protection. +com.bigdata.service.AbstractTransactionService.minReleaseAge=1 + +com.bigdata.btree.writeRetentionQueue.capacity=4000 +com.bigdata.btree.BTree.branchingFactor=128 + +# 200M initial extent. +com.bigdata.journal.AbstractJournal.initialExtent=209715200 +com.bigdata.journal.AbstractJournal.maximumExtent=209715200 + +## +## Setup for QUADS mode without the full text index. +## +com.bigdata.rdf.sail.truthMaintenance=false +com.bigdata.rdf.store.AbstractTripleStore.quads=false +com.bigdata.rdf.store.AbstractTripleStore.statementIdentifiers=false +com.bigdata.rdf.store.AbstractTripleStore.textIndex=true +com.bigdata.rdf.store.AbstractTripleStore.axiomsClass=com.bigdata.rdf.axioms.NoAxioms + +# Bump up the branching factor for the lexicon indices on the default kb. +com.bigdata.namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor=400 + +# Bump up the branching factor for the statement indices on the default kb. +com.bigdata.namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor=1024 Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/GraphStore.properties ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |