This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <mrp...@us...> - 2014-03-23 03:26:33
|
Revision: 8007 http://sourceforge.net/p/bigdata/code/8007 Author: mrpersonick Date: 2014-03-23 03:26:21 +0000 (Sun, 23 Mar 2014) Log Message: ----------- rolling back support for IPv4 prefixes Modified Paths: -------------- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java 2014-03-22 17:40:56 UTC (rev 8006) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java 2014-03-23 03:26:21 UTC (rev 8007) @@ -39,6 +39,7 @@ import com.bigdata.btree.BytesUtil.UnsignedByteArrayComparator; import com.bigdata.btree.keys.IKeyBuilder; +import com.bigdata.btree.keys.KeyBuilder; import com.bigdata.io.LongPacker; import com.bigdata.rdf.internal.DTE; import com.bigdata.rdf.internal.ILexiconConfiguration; @@ -85,10 +86,10 @@ */ private transient String hostAddress; - /** - * The IPv4 prefix byte. - */ - private transient byte prefix; +// /** +// * The IPv4 prefix byte. +// */ +// private transient byte prefix; /** * The cached byte[] key for the encoding of this IV. @@ -102,7 +103,7 @@ public IV<V, InetAddress> clone(final boolean clearCache) { - final IPAddrIV<V> tmp = new IPAddrIV<V>(value, prefix); + final IPAddrIV<V> tmp = new IPAddrIV<V>(value);//, prefix); // Propagate the cached byte[] key. tmp.key = key; @@ -123,7 +124,7 @@ /** * Ctor with internal value specified. */ - public IPAddrIV(final InetAddress value, final byte prefix) { + public IPAddrIV(final InetAddress value) {//, final byte prefix) { /* * TODO Using XSDBoolean so that we can know how to decode this thing @@ -133,7 +134,7 @@ this.value = value; - this.prefix = prefix; +// this.prefix = prefix; } @@ -168,11 +169,11 @@ this.value = InetAddress.getByName(ip); - final String suffix = matcher.group(4); +// final String suffix = matcher.group(4); // log.debug(suffix); - this.prefix = suffix != null ? Byte.valueOf(suffix) : (byte) 33; +// this.prefix = suffix != null ? Byte.valueOf(suffix) : (byte) 33; } else { @@ -181,8 +182,6 @@ // log.debug("no match"); } - - } @@ -248,11 +247,11 @@ public String getLocalName() { if (hostAddress == null) { - if (prefix < 33) { - hostAddress = value.getHostAddress() + "/" + prefix; - } else { +// if (prefix < 33) { +// hostAddress = value.getHostAddress() + "/" + prefix; +// } else { hostAddress = value.getHostAddress(); - } +// } } return hostAddress; @@ -266,8 +265,8 @@ return true; if (o instanceof IPAddrIV) { final InetAddress value2 = ((IPAddrIV<?>) o).value; - final byte prefix2 = ((IPAddrIV<?>) o).prefix; - return value.equals(value2) && prefix == prefix2; +// final byte prefix2 = ((IPAddrIV<?>) o).prefix; + return value.equals(value2);// && prefix == prefix2; } return false; } @@ -294,7 +293,7 @@ // First emit the flags byte. keyBuilder.appendSigned(flags()); - // Then append the InetAddress byte[]. + // Then append the InetAddress byte[] and the prefix. keyBuilder.append(key()); return keyBuilder; @@ -305,12 +304,16 @@ if (key == null) { - key = new byte[5]; +// final IKeyBuilder kb = KeyBuilder.newInstance(); +// +// kb.append(value.getAddress()); +// +// kb.append(prefix); +// +// key = kb.getKey(); + + key = value.getAddress(); - System.arraycopy(value.getAddress(), 0, key, 0, 4); - - key[4] = prefix; - } return key; @@ -358,13 +361,9 @@ try { - final byte[] ip = new byte[4]; - - System.arraycopy(key, 0, ip, 0, 4); - - final InetAddress value = InetAddress.getByAddress(ip); + final InetAddress value = InetAddress.getByAddress(key); - return new IPAddrIV(value, key[5]); + return new IPAddrIV(value); } catch (UnknownHostException ex) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-03-22 17:40:59
|
Revision: 8006 http://sourceforge.net/p/bigdata/code/8006 Author: mrpersonick Date: 2014-03-22 17:40:56 +0000 (Sat, 22 Mar 2014) Log Message: ----------- added support for IPv4 prefixes Modified Paths: -------------- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2014-03-22 11:06:14 UTC (rev 8005) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2014-03-22 17:40:56 UTC (rev 8006) @@ -548,7 +548,7 @@ final byte[] addr = new byte[4]; System.arraycopy(key, o, addr, 0, 4); final InetAddress ip = InetAddress.getByAddress(addr); - return new IPAddrIV(ip); + return new IPAddrIV(ip, key[o+4]); } catch (UnknownHostException ex) { throw new RuntimeException(ex); } Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java 2014-03-22 11:06:14 UTC (rev 8005) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java 2014-03-22 17:40:56 UTC (rev 8006) @@ -31,6 +31,8 @@ import java.io.Serializable; import java.net.InetAddress; import java.net.UnknownHostException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import org.openrdf.model.URI; import org.openrdf.model.Value; @@ -84,6 +86,11 @@ private transient String hostAddress; /** + * The IPv4 prefix byte. + */ + private transient byte prefix; + + /** * The cached byte[] key for the encoding of this IV. */ private transient byte[] key; @@ -95,7 +102,7 @@ public IV<V, InetAddress> clone(final boolean clearCache) { - final IPAddrIV<V> tmp = new IPAddrIV<V>(value); + final IPAddrIV<V> tmp = new IPAddrIV<V>(value, prefix); // Propagate the cached byte[] key. tmp.key = key; @@ -116,7 +123,7 @@ /** * Ctor with internal value specified. */ - public IPAddrIV(final InetAddress value) { + public IPAddrIV(final InetAddress value, final byte prefix) { /* * TODO Using XSDBoolean so that we can know how to decode this thing @@ -126,8 +133,16 @@ this.value = value; + this.prefix = prefix; + } + /* + * Somebody please fix this for the love of god. + */ + public static final Pattern pattern = + Pattern.compile("((?:[0-9]{1,3}\\.){3}[0-9]{1,3})((\\/)(([0-9]{1,2})))?"); + /** * Ctor with host address specified. */ @@ -139,9 +154,36 @@ */ super(VTE.URI, DTE.XSDBoolean); - this.value = InetAddress.getByName(hostAddress); this.hostAddress = hostAddress; + final Matcher matcher = pattern.matcher(hostAddress); + + final boolean matches = matcher.matches(); + + if (matches) { + + final String ip = matcher.group(1); + +// log.debug(ip); + + this.value = InetAddress.getByName(ip); + + final String suffix = matcher.group(4); + +// log.debug(suffix); + + this.prefix = suffix != null ? Byte.valueOf(suffix) : (byte) 33; + + } else { + + throw new IllegalArgumentException("not an IP: " + hostAddress); + +// log.debug("no match"); + + } + + + } /** @@ -205,7 +247,13 @@ @Override public String getLocalName() { if (hostAddress == null) { - hostAddress = value.getHostAddress(); + + if (prefix < 33) { + hostAddress = value.getHostAddress() + "/" + prefix; + } else { + hostAddress = value.getHostAddress(); + } + } return hostAddress; } @@ -218,7 +266,8 @@ return true; if (o instanceof IPAddrIV) { final InetAddress value2 = ((IPAddrIV<?>) o).value; - return value.equals(value2); + final byte prefix2 = ((IPAddrIV<?>) o).prefix; + return value.equals(value2) && prefix == prefix2; } return false; } @@ -256,8 +305,12 @@ if (key == null) { - key = value.getAddress(); + key = new byte[5]; + + System.arraycopy(value.getAddress(), 0, key, 0, 4); + key[4] = prefix; + } return key; @@ -305,9 +358,13 @@ try { - final InetAddress value = InetAddress.getByAddress(key); + final byte[] ip = new byte[4]; + + System.arraycopy(key, 0, ip, 0, 4); + + final InetAddress value = InetAddress.getByAddress(ip); - return new IPAddrIV(value); + return new IPAddrIV(value, key[5]); } catch (UnknownHostException ex) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-03-22 11:06:22
|
Revision: 8005 http://sourceforge.net/p/bigdata/code/8005 Author: thompsonbry Date: 2014-03-22 11:06:14 +0000 (Sat, 22 Mar 2014) Log Message: ----------- javadoc on the semantics of interrupting (a) the submission of a query; and (b) a running query. See #864 and #707. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java 2014-03-21 01:21:03 UTC (rev 8004) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java 2014-03-22 11:06:14 UTC (rev 8005) @@ -63,6 +63,7 @@ import com.bigdata.bop.PipelineOp; import com.bigdata.bop.bindingSet.ListBindingSet; import com.bigdata.bop.engine.IRunningQuery; +import com.bigdata.bop.engine.QueryEngine; import com.bigdata.bop.rdf.join.ChunkedMaterializationIterator; import com.bigdata.journal.TimestampUtility; import com.bigdata.rdf.internal.IV; @@ -659,6 +660,38 @@ /** * Evaluate a query plan (core method). + * <p> + * As explained in some depth at <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/707"> + * BlockingBuffer.close() does not unblock threads </a> and <a + * href="http://trac.bigdata.com/ticket/864"> Semantics of interrupting a + * running query</a>, (a) you can not interrupted the thread that submits a + * query until the {@link CloseableIteration} has been returned to the + * caller submitting that query; (b) + * <p> + * (a) If you interrupt the thread submitting the query, the query may + * actually execute. This can occur because the interrupt can arise between + * the time at which the query begins to execute on the {@link QueryEngine} + * and the time at which the {@link IRunningQuery} object is bound up inside + * of the returned {@link CloseableIteration} and returned to the caller. + * Until the caller has possession of the {@link CloseableIteration}, an + * interrupt will not cause the associated {@link IRunningQuery} to be + * terminated. See <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/707"> + * BlockingBuffer.close() does not unblock threads </a> + * <p> + * (b) If you interrupt the thread draining the solutions from the + * {@link CloseableIteration} or otherwise cause + * {@link CloseableIteration#close()} to become invoked, then the + * {@link IRunningQuery} will be interrupted. Per <a + * href="http://trac.bigdata.com/ticket/864"> Semantics of interrupting a + * running query</a>, that interrupt is interpreted as <em>normal</em> + * termination (this supports the use case of LIMIT and is built deeply into + * the {@link QueryEngine} semantics). In order for the application to + * distinguish between a case where it has interrupted the query and a case + * where the query has been interrupted by a LIMIT, the application MUST + * notice when it decides to interrupt a given query and then discard the + * outcome of that query. * * @param astContainer * The query model. @@ -682,6 +715,11 @@ * containing the solutions for the query. * * @throws QueryEvaluationException + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/707"> + * BlockingBuffer.close() does not unblock threads </a> + * @see <a href="http://trac.bigdata.com/ticket/864"> Semantics of + * interrupting a running query</a> */ static //private Note: Exposed to CBD class. CloseableIteration<BindingSet, QueryEvaluationException> evaluateQuery( This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-03-21 01:21:05
|
Revision: 8004 http://sourceforge.net/p/bigdata/code/8004 Author: thompsonbry Date: 2014-03-21 01:21:03 +0000 (Fri, 21 Mar 2014) Log Message: ----------- Fixing the .classpath for the eclipse IDE to include the jackson jar. Modified Paths: -------------- branches/RDR/.classpath Modified: branches/RDR/.classpath =================================================================== --- branches/RDR/.classpath 2014-03-21 01:12:03 UTC (rev 8003) +++ branches/RDR/.classpath 2014-03-21 01:21:03 UTC (rev 8004) @@ -59,6 +59,7 @@ <classpathentry exported="true" kind="lib" path="bigdata/lib/lucene/lucene-analyzers-3.0.0.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/lucene/lucene-core-3.0.0.jar"/> <classpathentry kind="lib" path="bigdata/lib/jetty/jetty-client-9.1.3.v20140225.jar" sourcepath="/Users/bryan/Documents/workspace/org.eclipse.jetty.project-jetty-9.1-wsbatch"/> + <classpathentry kind="lib" path="bigdata-sails/lib/jackson-core-2.3.3-20140314.203554-3.jar"/> <classpathentry exported="true" kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/high-scale-lib-v1.1.2.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/junit-ext-1.1-b3-dev.jar"/> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-03-21 01:12:06
|
Revision: 8003 http://sourceforge.net/p/bigdata/code/8003 Author: thompsonbry Date: 2014-03-21 01:12:03 +0000 (Fri, 21 Mar 2014) Log Message: ----------- Tagging the 1.2.5 release. Added Paths: ----------- tags/BIGDATA_RELEASE_1_2_5/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-03-21 01:11:12
|
Revision: 8002 http://sourceforge.net/p/bigdata/code/8002 Author: thompsonbry Date: 2014-03-21 01:11:10 +0000 (Fri, 21 Mar 2014) Log Message: ----------- Publishing the 1.2.5 release. This addresses a durable memory leak in the journal and provides eager failure for a problem where the childAddr of a B+Tree node is 0L (null). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_4/build.properties Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_4/bigdata/src/releases/RELEASE_1_2_5.txt Added: branches/BIGDATA_RELEASE_1_2_4/bigdata/src/releases/RELEASE_1_2_5.txt =================================================================== --- branches/BIGDATA_RELEASE_1_2_4/bigdata/src/releases/RELEASE_1_2_5.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_2_4/bigdata/src/releases/RELEASE_1_2_5.txt 2014-03-21 01:11:10 UTC (rev 8002) @@ -0,0 +1,304 @@ +This is a minor release of bigdata(R). + +Bigdata is a horizontally-scaled, open-source architecture for indexed data with an emphasis on RDF capable of loading 1B triples in under one hour on a 15 node cluster. Bigdata operates in both a single machine mode (Journal) and a cluster mode (Federation). The Journal provides fast scalable ACID indexed storage for very large data sets, up to 50 billion triples / quads. The federation provides fast scalable shard-wise parallel indexed storage using dynamic sharding and shard-wise ACID updates and incremental cluster size growth. Both platforms support fully concurrent readers with snapshot isolation. + +Distributed processing offers greater throughput but does not reduce query or update latency. Choose the Journal when the anticipated scale and throughput requirements permit. Choose the Federation when the administrative and machine overhead associated with operating a cluster is an acceptable tradeoff to have essentially unlimited data scaling and throughput. + +See [1,2,8] for instructions on installing bigdata(R), [4] for the javadoc, and [3,5,6] for news, questions, and the latest developments. For more information about SYSTAP, LLC and bigdata, see [7]. + +Starting with the 1.0.0 release, we offer a WAR artifact [8] for easy installation of the single machine RDF database. For custom development and cluster installations we recommend checking out the code from SVN using the tag for this release. The code will build automatically under eclipse. You can also build the code using the ant script. The cluster installer requires the use of the ant script. + +You can download the WAR from: + +http://sourceforge.net/projects/bigdata/ + +You can checkout this release from: + +https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_2_5 + +New features: + +- SPARQL 1.1 Update Extensions (SPARQL UPDATE for named solution sets). See https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=SPARQL_Update for more information. +- SPARQL 1.1 Property Paths. +- Remote Java client for Multi-Tenancy extensions NanoSparqlServer +- Sesame 2.6.10 dependency +- Plus numerous other bug fixes and performance enhancements. + +Feature summary: + +- Single machine data storage to ~50B triples/quads (RWStore); +- Clustered data storage is essentially unlimited; +- Simple embedded and/or webapp deployment (NanoSparqlServer); +- Triples, quads, or triples with provenance (SIDs); +- Fast RDFS+ inference and truth maintenance; +- Fast 100% native SPARQL 1.1 evaluation; +- Integrated "analytic" query package; +- %100 Java memory manager leverages the JVM native heap (no GC); + +Road map [3]: + +- High availability for the journal and the cluster. +- Runtime Query Optimizer for Analytic Query mode; and +- Simplified deployment, configuration, and administration for clusters. + +Change log: + + Note: Versions with (*) MAY require data migration. For details, see [9]. + +1.2.5: + +- http://trac.bigdata.com/ticket/817 (Unexplained increase in journal size) +- http://trac.bigdata.com/ticket/855 (Work on "AssertionError: Child does not have persistent identity") + +1.2.4: + +- http://sourceforge.net/apps/trac/bigdata/ticket/777 (ConcurrentModificationException in ASTComplexOptionalOptimizer) + +1.2.3: + +- http://sourceforge.net/apps/trac/bigdata/ticket/168 (Maven Build) +- http://sourceforge.net/apps/trac/bigdata/ticket/196 (Journal leaks memory). +- http://sourceforge.net/apps/trac/bigdata/ticket/235 (Occasional deadlock in CI runs in com.bigdata.io.writecache.TestAll) +- http://sourceforge.net/apps/trac/bigdata/ticket/312 (CI (mock) quorums deadlock) +- http://sourceforge.net/apps/trac/bigdata/ticket/405 (Optimize hash join for subgroups with no incoming bound vars.) +- http://sourceforge.net/apps/trac/bigdata/ticket/412 (StaticAnalysis#getDefinitelyBound() ignores exogenous variables.) +- http://sourceforge.net/apps/trac/bigdata/ticket/485 (RDFS Plus Profile) +- http://sourceforge.net/apps/trac/bigdata/ticket/495 (SPARQL 1.1 Property Paths) +- http://sourceforge.net/apps/trac/bigdata/ticket/519 (Negative parser tests) +- http://sourceforge.net/apps/trac/bigdata/ticket/531 (SPARQL UPDATE for SOLUTION SETS) +- http://sourceforge.net/apps/trac/bigdata/ticket/535 (Optimize JOIN VARS for Sub-Selects) +- http://sourceforge.net/apps/trac/bigdata/ticket/555 (Support PSOutputStream/InputStream at IRawStore) +- http://sourceforge.net/apps/trac/bigdata/ticket/559 (Use RDFFormat.NQUADS as the format identifier for the NQuads parser) +- http://sourceforge.net/apps/trac/bigdata/ticket/570 (MemoryManager Journal does not implement all methods). +- http://sourceforge.net/apps/trac/bigdata/ticket/575 (NSS Admin API) +- http://sourceforge.net/apps/trac/bigdata/ticket/577 (DESCRIBE with OFFSET/LIMIT needs to use sub-select) +- http://sourceforge.net/apps/trac/bigdata/ticket/578 (Concise Bounded Description (CBD)) +- http://sourceforge.net/apps/trac/bigdata/ticket/579 (CONSTRUCT should use distinct SPO filter) +- http://sourceforge.net/apps/trac/bigdata/ticket/583 (VoID in ServiceDescription) +- http://sourceforge.net/apps/trac/bigdata/ticket/586 (RWStore immedateFree() not removing Checkpoint addresses from the historical index cache.) +- http://sourceforge.net/apps/trac/bigdata/ticket/590 (nxparser fails with uppercase language tag) +- http://sourceforge.net/apps/trac/bigdata/ticket/592 (Optimize RWStore allocator sizes) +- http://sourceforge.net/apps/trac/bigdata/ticket/593 (Ugrade to Sesame 2.6.10) +- http://sourceforge.net/apps/trac/bigdata/ticket/594 (WAR was deployed using TRIPLES rather than QUADS by default) +- http://sourceforge.net/apps/trac/bigdata/ticket/596 (Change web.xml parameter names to be consistent with Jini/River) +- http://sourceforge.net/apps/trac/bigdata/ticket/597 (SPARQL UPDATE LISTENER) +- http://sourceforge.net/apps/trac/bigdata/ticket/598 (B+Tree branching factor and HTree addressBits are confused in their NodeSerializer implementations) +- http://sourceforge.net/apps/trac/bigdata/ticket/599 (BlobIV for blank node : NotMaterializedException) +- http://sourceforge.net/apps/trac/bigdata/ticket/600 (BlobIV collision counter hits false limit.) +- http://sourceforge.net/apps/trac/bigdata/ticket/601 (Log uncaught exceptions) +- http://sourceforge.net/apps/trac/bigdata/ticket/602 (RWStore does not discard logged deletes on reset()) +- http://sourceforge.net/apps/trac/bigdata/ticket/607 (History service / index) +- http://sourceforge.net/apps/trac/bigdata/ticket/608 (LOG BlockingBuffer not progressing at INFO or lower level) +- http://sourceforge.net/apps/trac/bigdata/ticket/609 (bigdata-ganglia is required dependency for Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/611 (The code that processes SPARQL Update has a typo) +- http://sourceforge.net/apps/trac/bigdata/ticket/612 (Bigdata scale-up depends on zookeper) +- http://sourceforge.net/apps/trac/bigdata/ticket/613 (SPARQL UPDATE response inlines large DELETE or INSERT triple graphs) +- http://sourceforge.net/apps/trac/bigdata/ticket/614 (static join optimizer does not get ordering right when multiple tails share vars with ancestry) +- http://sourceforge.net/apps/trac/bigdata/ticket/615 (AST2BOpUtility wraps UNION with an unnecessary hash join) +- http://sourceforge.net/apps/trac/bigdata/ticket/616 (Row store read/update not isolated on Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/617 (Concurrent KB create fails with "No axioms defined?") +- http://sourceforge.net/apps/trac/bigdata/ticket/618 (DirectBufferPool.poolCapacity maximum of 2GB) +- http://sourceforge.net/apps/trac/bigdata/ticket/619 (RemoteRepository class should use application/x-www-form-urlencoded for large POST requests) +- http://sourceforge.net/apps/trac/bigdata/ticket/620 (UpdateServlet fails to parse MIMEType when doing conneg.) +- http://sourceforge.net/apps/trac/bigdata/ticket/626 (Expose performance counters for read-only indices) +- http://sourceforge.net/apps/trac/bigdata/ticket/627 (Environment variable override for NSS properties file) +- http://sourceforge.net/apps/trac/bigdata/ticket/628 (Create a bigdata-client jar for the NSS REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/631 (ClassCastException in SIDs mode query) +- http://sourceforge.net/apps/trac/bigdata/ticket/632 (NotMaterializedException when a SERVICE call needs variables that are provided as query input bindings) +- http://sourceforge.net/apps/trac/bigdata/ticket/633 (ClassCastException when binding non-uri values to a variable that occurs in predicate position) +- http://sourceforge.net/apps/trac/bigdata/ticket/638 (Change DEFAULT_MIN_RELEASE_AGE to 1ms) +- http://sourceforge.net/apps/trac/bigdata/ticket/640 (Conditionally rollback() BigdataSailConnection if dirty) +- http://sourceforge.net/apps/trac/bigdata/ticket/642 (Property paths do not work inside of exists/not exists filters) +- http://sourceforge.net/apps/trac/bigdata/ticket/643 (Add web.xml parameters to lock down public NSS end points) +- http://sourceforge.net/apps/trac/bigdata/ticket/644 (Bigdata2Sesame2BindingSetIterator can fail to notice asynchronous close()) +- http://sourceforge.net/apps/trac/bigdata/ticket/650 (Can not POST RDF to a graph using REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/654 (Rare AssertionError in WriteCache.clearAddrMap()) +- http://sourceforge.net/apps/trac/bigdata/ticket/655 (SPARQL REGEX operator does not perform case-folding correctly for Unicode data) +- http://sourceforge.net/apps/trac/bigdata/ticket/656 (InFactory bug when IN args consist of a single literal) +- http://sourceforge.net/apps/trac/bigdata/ticket/647 (SIDs mode creates unnecessary hash join for GRAPH group patterns) +- http://sourceforge.net/apps/trac/bigdata/ticket/667 (Provide NanoSparqlServer initialization hook) +- http://sourceforge.net/apps/trac/bigdata/ticket/669 (Doubly nested subqueries yield no results with LIMIT) +- http://sourceforge.net/apps/trac/bigdata/ticket/675 (Flush indices in parallel during checkpoint to reduce IO latency) +- http://sourceforge.net/apps/trac/bigdata/ticket/682 (AtomicRowFilter UnsupportedOperationException) + +1.2.2: + +- http://sourceforge.net/apps/trac/bigdata/ticket/586 (RWStore immedateFree() not removing Checkpoint addresses from the historical index cache.) +- http://sourceforge.net/apps/trac/bigdata/ticket/602 (RWStore does not discard logged deletes on reset()) +- http://sourceforge.net/apps/trac/bigdata/ticket/603 (Prepare critical maintenance release as branch of 1.2.1) + +1.2.1: + +- http://sourceforge.net/apps/trac/bigdata/ticket/533 (Review materialization for inline IVs) +- http://sourceforge.net/apps/trac/bigdata/ticket/539 (NotMaterializedException with REGEX and Vocab) +- http://sourceforge.net/apps/trac/bigdata/ticket/540 (SPARQL UPDATE using NSS via index.html) +- http://sourceforge.net/apps/trac/bigdata/ticket/541 (MemoryManaged backed Journal mode) +- http://sourceforge.net/apps/trac/bigdata/ticket/546 (Index cache for Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/549 (BTree can not be cast to Name2Addr (MemStore recycler)) +- http://sourceforge.net/apps/trac/bigdata/ticket/550 (NPE in Leaf.getKey() : root cause was user error) +- http://sourceforge.net/apps/trac/bigdata/ticket/558 (SPARQL INSERT not working in same request after INSERT DATA) +- http://sourceforge.net/apps/trac/bigdata/ticket/562 (Sub-select in INSERT cause NPE in UpdateExprBuilder) +- http://sourceforge.net/apps/trac/bigdata/ticket/563 (DISTINCT ORDER BY) +- http://sourceforge.net/apps/trac/bigdata/ticket/567 (Failure to set cached value on IV results in incorrect behavior for complex UPDATE operation) +- http://sourceforge.net/apps/trac/bigdata/ticket/568 (DELETE WHERE fails with Java AssertionError) +- http://sourceforge.net/apps/trac/bigdata/ticket/569 (LOAD-CREATE-LOAD using virgin journal fails with "Graph exists" exception) +- http://sourceforge.net/apps/trac/bigdata/ticket/571 (DELETE/INSERT WHERE handling of blank nodes) +- http://sourceforge.net/apps/trac/bigdata/ticket/573 (NullPointerException when attempting to INSERT DATA containing a blank node) + +1.2.0: (*) + +- http://sourceforge.net/apps/trac/bigdata/ticket/92 (Monitoring webapp) +- http://sourceforge.net/apps/trac/bigdata/ticket/267 (Support evaluation of 3rd party operators) +- http://sourceforge.net/apps/trac/bigdata/ticket/337 (Compact and efficient movement of binding sets between nodes.) +- http://sourceforge.net/apps/trac/bigdata/ticket/433 (Cluster leaks threads under read-only index operations: DGC thread leak) +- http://sourceforge.net/apps/trac/bigdata/ticket/437 (Thread-local cache combined with unbounded thread pools causes effective memory leak: termCache memory leak & thread-local buffers) +- http://sourceforge.net/apps/trac/bigdata/ticket/438 (KeyBeforePartitionException on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/439 (Class loader problem) +- http://sourceforge.net/apps/trac/bigdata/ticket/441 (Ganglia integration) +- http://sourceforge.net/apps/trac/bigdata/ticket/443 (Logger for RWStore transaction service and recycler) +- http://sourceforge.net/apps/trac/bigdata/ticket/444 (SPARQL query can fail to notice when IRunningQuery.isDone() on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/445 (RWStore does not track tx release correctly) +- http://sourceforge.net/apps/trac/bigdata/ticket/446 (HTTP Repostory broken with bigdata 1.1.0) +- http://sourceforge.net/apps/trac/bigdata/ticket/448 (SPARQL 1.1 UPDATE) +- http://sourceforge.net/apps/trac/bigdata/ticket/449 (SPARQL 1.1 Federation extension) +- http://sourceforge.net/apps/trac/bigdata/ticket/451 (Serialization error in SIDs mode on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/454 (Global Row Store Read on Cluster uses Tx) +- http://sourceforge.net/apps/trac/bigdata/ticket/456 (IExtension implementations do point lookups on lexicon) +- http://sourceforge.net/apps/trac/bigdata/ticket/457 ("No such index" on cluster under concurrent query workload) +- http://sourceforge.net/apps/trac/bigdata/ticket/458 (Java level deadlock in DS) +- http://sourceforge.net/apps/trac/bigdata/ticket/460 (Uncaught interrupt resolving RDF terms) +- http://sourceforge.net/apps/trac/bigdata/ticket/461 (KeyAfterPartitionException / KeyBeforePartitionException on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/463 (NoSuchVocabularyItem with LUBMVocabulary for DerivedNumericsExtension) +- http://sourceforge.net/apps/trac/bigdata/ticket/464 (Query statistics do not update correctly on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/465 (Too many GRS reads on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/469 (Sail does not flush assertion buffers before query) +- http://sourceforge.net/apps/trac/bigdata/ticket/472 (acceptTaskService pool size on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/475 (Optimize serialization for query messages on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/476 (Test suite for writeCheckpoint() and recycling for BTree/HTree) +- http://sourceforge.net/apps/trac/bigdata/ticket/478 (Cluster does not map input solution(s) across shards) +- http://sourceforge.net/apps/trac/bigdata/ticket/480 (Error releasing deferred frees using 1.0.6 against a 1.0.4 journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/481 (PhysicalAddressResolutionException against 1.0.6) +- http://sourceforge.net/apps/trac/bigdata/ticket/482 (RWStore reset() should be thread-safe for concurrent readers) +- http://sourceforge.net/apps/trac/bigdata/ticket/484 (Java API for NanoSparqlServer REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/491 (AbstractTripleStore.destroy() does not clear the locator cache) +- http://sourceforge.net/apps/trac/bigdata/ticket/492 (Empty chunk in ThickChunkMessage (cluster)) +- http://sourceforge.net/apps/trac/bigdata/ticket/493 (Virtual Graphs) +- http://sourceforge.net/apps/trac/bigdata/ticket/496 (Sesame 2.6.3) +- http://sourceforge.net/apps/trac/bigdata/ticket/497 (Implement STRBEFORE, STRAFTER, and REPLACE) +- http://sourceforge.net/apps/trac/bigdata/ticket/498 (Bring bigdata RDF/XML parser up to openrdf 2.6.3.) +- http://sourceforge.net/apps/trac/bigdata/ticket/500 (SPARQL 1.1 Service Description) +- http://www.openrdf.org/issues/browse/SES-884 (Aggregation with an solution set as input should produce an empty solution as output) +- http://www.openrdf.org/issues/browse/SES-862 (Incorrect error handling for SPARQL aggregation; fix in 2.6.1) +- http://www.openrdf.org/issues/browse/SES-873 (Order the same Blank Nodes together in ORDER BY) +- http://sourceforge.net/apps/trac/bigdata/ticket/501 (SPARQL 1.1 BINDINGS are ignored) +- http://sourceforge.net/apps/trac/bigdata/ticket/503 (Bigdata2Sesame2BindingSetIterator throws QueryEvaluationException were it should throw NoSuchElementException) +- http://sourceforge.net/apps/trac/bigdata/ticket/504 (UNION with Empty Group Pattern) +- http://sourceforge.net/apps/trac/bigdata/ticket/505 (Exception when using SPARQL sort & statement identifiers) +- http://sourceforge.net/apps/trac/bigdata/ticket/506 (Load, closure and query performance in 1.1.x versus 1.0.x) +- http://sourceforge.net/apps/trac/bigdata/ticket/508 (LIMIT causes hash join utility to log errors) +- http://sourceforge.net/apps/trac/bigdata/ticket/513 (Expose the LexiconConfiguration to Function BOPs) +- http://sourceforge.net/apps/trac/bigdata/ticket/515 (Query with two "FILTER NOT EXISTS" expressions returns no results) +- http://sourceforge.net/apps/trac/bigdata/ticket/516 (REGEXBOp should cache the Pattern when it is a constant) +- http://sourceforge.net/apps/trac/bigdata/ticket/517 (Java 7 Compiler Compatibility) +- http://sourceforge.net/apps/trac/bigdata/ticket/518 (Review function bop subclass hierarchy, optimize datatype bop, etc.) +- http://sourceforge.net/apps/trac/bigdata/ticket/520 (CONSTRUCT WHERE shortcut) +- http://sourceforge.net/apps/trac/bigdata/ticket/521 (Incremental materialization of Tuple and Graph query results) +- http://sourceforge.net/apps/trac/bigdata/ticket/525 (Modify the IChangeLog interface to support multiple agents) +- http://sourceforge.net/apps/trac/bigdata/ticket/527 (Expose timestamp of LexiconRelation to function bops) +- http://sourceforge.net/apps/trac/bigdata/ticket/532 (ClassCastException during hash join (can not be cast to TermId)) +- http://sourceforge.net/apps/trac/bigdata/ticket/533 (Review materialization for inline IVs) +- http://sourceforge.net/apps/trac/bigdata/ticket/534 (BSBM BI Q5 error using MERGE JOIN) + +1.1.0 (*) + + - http://sourceforge.net/apps/trac/bigdata/ticket/23 (Lexicon joins) + - http://sourceforge.net/apps/trac/bigdata/ticket/109 (Store large literals as "blobs") + - http://sourceforge.net/apps/trac/bigdata/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://sourceforge.net/apps/trac/bigdata/ticket/203 (Implement an persistence capable hash table to support analytic query) + - http://sourceforge.net/apps/trac/bigdata/ticket/209 (AccessPath should visit binding sets rather than elements for high level query.) + - http://sourceforge.net/apps/trac/bigdata/ticket/227 (SliceOp appears to be necessary when operator plan should suffice without) + - http://sourceforge.net/apps/trac/bigdata/ticket/232 (Bottom-up evaluation semantics). + - http://sourceforge.net/apps/trac/bigdata/ticket/246 (Derived xsd numeric data types must be inlined as extension types.) + - http://sourceforge.net/apps/trac/bigdata/ticket/254 (Revisit pruning of intermediate variable bindings during query execution) + - http://sourceforge.net/apps/trac/bigdata/ticket/261 (Lift conditions out of subqueries.) + - http://sourceforge.net/apps/trac/bigdata/ticket/300 (Native ORDER BY) + - http://sourceforge.net/apps/trac/bigdata/ticket/324 (Inline predeclared URIs and namespaces in 2-3 bytes) + - http://sourceforge.net/apps/trac/bigdata/ticket/330 (NanoSparqlServer does not locate "html" resources when run from jar) + - http://sourceforge.net/apps/trac/bigdata/ticket/334 (Support inlining of unicode data in the statement indices.) + - http://sourceforge.net/apps/trac/bigdata/ticket/364 (Scalable default graph evaluation) + - http://sourceforge.net/apps/trac/bigdata/ticket/368 (Prune variable bindings during query evaluation) + - http://sourceforge.net/apps/trac/bigdata/ticket/370 (Direct translation of openrdf AST to bigdata AST) + - http://sourceforge.net/apps/trac/bigdata/ticket/373 (Fix StrBOp and other IValueExpressions) + - http://sourceforge.net/apps/trac/bigdata/ticket/377 (Optimize OPTIONALs with multiple statement patterns.) + - http://sourceforge.net/apps/trac/bigdata/ticket/380 (Native SPARQL evaluation on cluster) + - http://sourceforge.net/apps/trac/bigdata/ticket/387 (Cluster does not compute closure) + - http://sourceforge.net/apps/trac/bigdata/ticket/395 (HTree hash join performance) + - http://sourceforge.net/apps/trac/bigdata/ticket/401 (inline xsd:unsigned datatypes) + - http://sourceforge.net/apps/trac/bigdata/ticket/408 (xsd:string cast fails for non-numeric data) + - http://sourceforge.net/apps/trac/bigdata/ticket/421 (New query hints model.) + - http://sourceforge.net/apps/trac/bigdata/ticket/431 (Use of read-only tx per query defeats cache on cluster) + +1.0.3 + + - http://sourceforge.net/apps/trac/bigdata/ticket/217 (BTreeCounters does not track bytes released) + - http://sourceforge.net/apps/trac/bigdata/ticket/269 (Refactor performance counters using accessor interface) + - http://sourceforge.net/apps/trac/bigdata/ticket/329 (B+Tree should delete bloom filter when it is disabled.) + - http://sourceforge.net/apps/trac/bigdata/ticket/372 (RWStore does not prune the CommitRecordIndex) + - http://sourceforge.net/apps/trac/bigdata/ticket/375 (Persistent memory leaks (RWStore/DISK)) + - http://sourceforge.net/apps/trac/bigdata/ticket/385 (FastRDFValueCoder2: ArrayIndexOutOfBoundsException) + - http://sourceforge.net/apps/trac/bigdata/ticket/391 (Release age advanced on WORM mode journal) + - http://sourceforge.net/apps/trac/bigdata/ticket/392 (Add a DELETE by access path method to the NanoSparqlServer) + - http://sourceforge.net/apps/trac/bigdata/ticket/393 (Add "context-uri" request parameter to specify the default context for INSERT in the REST API) + - http://sourceforge.net/apps/trac/bigdata/ticket/394 (log4j configuration error message in WAR deployment) + - http://sourceforge.net/apps/trac/bigdata/ticket/399 (Add a fast range count method to the REST API) + - http://sourceforge.net/apps/trac/bigdata/ticket/422 (Support temp triple store wrapped by a BigdataSail) + - http://sourceforge.net/apps/trac/bigdata/ticket/424 (NQuads support for NanoSparqlServer) + - http://sourceforge.net/apps/trac/bigdata/ticket/425 (Bug fix to DEFAULT_RDF_FORMAT for bulk data loader in scale-out) + - http://sourceforge.net/apps/trac/bigdata/ticket/426 (Support either lockfile (procmail) and dotlockfile (liblockfile1) in scale-out) + - http://sourceforge.net/apps/trac/bigdata/ticket/427 (BigdataSail#getReadOnlyConnection() race condition with concurrent commit) + - http://sourceforge.net/apps/trac/bigdata/ticket/435 (Address is 0L) + - http://sourceforge.net/apps/trac/bigdata/ticket/436 (TestMROWTransactions failure in CI) + +1.0.2 + + - http://sourceforge.net/apps/trac/bigdata/ticket/32 (Query time expansion of (foo rdf:type rdfs:Resource) drags in SPORelation for scale-out.) + - http://sourceforge.net/apps/trac/bigdata/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://sourceforge.net/apps/trac/bigdata/ticket/356 (Query not terminated by error.) + - http://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://sourceforge.net/apps/trac/bigdata/ticket/361 (IRunningQuery not closed promptly.) + - http://sourceforge.net/apps/trac/bigdata/ticket/371 (DataLoader fails to load resources available from the classpath.) + - http://sourceforge.net/apps/trac/bigdata/ticket/376 (Support for the streaming of bigdata IBindingSets into a sparql query.) + - http://sourceforge.net/apps/trac/bigdata/ticket/378 (ClosedByInterruptException during heavy query mix.) + - http://sourceforge.net/apps/trac/bigdata/ticket/379 (NotSerializableException for SPOAccessPath.) + - http://sourceforge.net/apps/trac/bigdata/ticket/382 (Change dependencies to Apache River 2.2.0) + +1.0.1 (*) + + - http://sourceforge.net/apps/trac/bigdata/ticket/107 (Unicode clean schema names in the sparse row store). + - http://sourceforge.net/apps/trac/bigdata/ticket/124 (TermIdEncoder should use more bits for scale-out). + - http://sourceforge.net/apps/trac/bigdata/ticket/225 (OSX requires specialized performance counter collection classes). + - http://sourceforge.net/apps/trac/bigdata/ticket/348 (BigdataValueFactory.asValue() must return new instance when DummyIV is used). + - http://sourceforge.net/apps/trac/bigdata/ticket/349 (TermIdEncoder limits Journal to 2B distinct RDF Values per triple/quad store instance). + - http://sourceforge.net/apps/trac/bigdata/ticket/351 (SPO not Serializable exception in SIDS mode (scale-out)). + - http://sourceforge.net/apps/trac/bigdata/ticket/352 (ClassCastException when querying with binding-values that are not known to the database). + - http://sourceforge.net/apps/trac/bigdata/ticket/353 (UnsupportedOperatorException for some SPARQL queries). + - http://sourceforge.net/apps/trac/bigdata/ticket/355 (Query failure when comparing with non materialized value). + - http://sourceforge.net/apps/trac/bigdata/ticket/357 (RWStore reports "FixedAllocator returning null address, with freeBits".) + - http://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://sourceforge.net/apps/trac/bigdata/ticket/362 (log4j - slf4j bridge.) + +For more information about bigdata(R), please see the following links: + +[1] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Main_Page +[2] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=GettingStarted +[3] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Roadmap +[4] http://www.bigdata.com/bigdata/docs/api/ +[5] http://sourceforge.net/projects/bigdata/ +[6] http://www.bigdata.com/blog +[7] http://www.systap.com/bigdata.htm +[8] http://sourceforge.net/projects/bigdata/files/bigdata/ +[9] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=DataMigration + +About bigdata: + +Bigdata(R) is a horizontally-scaled, general purpose storage and computing fabric for ordered data (B+Trees), designed to operate on either a single server or a cluster of commodity hardware. Bigdata(R) uses dynamically partitioned key-range shards in order to remove any realistic scaling limits - in principle, bigdata(R) may be deployed on 10s, 100s, or even thousands of machines and new capacity may be added incrementally without requiring the full reload of all data. The bigdata(R) RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), and datum level provenance. Modified: branches/BIGDATA_RELEASE_1_2_4/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_2_4/build.properties 2014-03-19 18:21:16 UTC (rev 8001) +++ branches/BIGDATA_RELEASE_1_2_4/build.properties 2014-03-21 01:11:10 UTC (rev 8002) @@ -82,7 +82,7 @@ release.dir=ant-release # The build version (note: 0.82b -> 0.82.0); 0.83.2 is followed by 1.0.0 -build.ver=1.2.4 +build.ver=1.2.5 build.ver.osgi=1.0 # Set true to do a snapshot build. This changes the value of ${version} to This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-03-19 18:21:19
|
Revision: 8001 http://sourceforge.net/p/bigdata/code/8001 Author: mrpersonick Date: 2014-03-19 18:21:16 +0000 (Wed, 19 Mar 2014) Log Message: ----------- added a helper servlet for the workbench Modified Paths: -------------- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java Added Paths: ----------- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java 2014-03-19 16:44:49 UTC (rev 8000) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java 2014-03-19 18:21:16 UTC (rev 8001) @@ -57,6 +57,8 @@ private InsertServlet m_insertServlet; private DeleteServlet m_deleteServlet; private UpdateServlet m_updateServlet; + private WorkbenchServlet m_workbenchServlet; + /** * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/584"> * DESCRIBE CACHE </a> @@ -80,12 +82,14 @@ m_updateServlet = new UpdateServlet(); m_deleteServlet = new DeleteServlet(); m_describeServlet = new DescribeCacheServlet(); + m_workbenchServlet = new WorkbenchServlet(); m_queryServlet.init(getServletConfig()); m_insertServlet.init(getServletConfig()); m_updateServlet.init(getServletConfig()); m_deleteServlet.init(getServletConfig()); m_describeServlet.init(getServletConfig()); + m_workbenchServlet.init(getServletConfig()); } @@ -120,6 +124,11 @@ m_describeServlet = null; } + if (m_workbenchServlet != null) { + m_workbenchServlet.destroy(); + m_workbenchServlet = null; + } + super.destroy(); } @@ -222,7 +231,11 @@ buildResponse(resp, HTTP_OK, MIME_TEXT_PLAIN); - } else if(req.getParameter("uri") != null) { + } else if (req.getParameter(WorkbenchServlet.ATTR_WORKBENCH) != null) { + + m_workbenchServlet.doPost(req, resp); + + } else if (req.getParameter("uri") != null) { // INSERT via w/ URIs m_insertServlet.doPost(req, resp); Added: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java (rev 0) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java 2014-03-19 18:21:16 UTC (rev 8001) @@ -0,0 +1,183 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sail.webapp; + +import java.io.IOException; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.log4j.Logger; +import org.openrdf.model.Graph; +import org.openrdf.model.impl.GraphImpl; +import org.openrdf.rio.RDFFormat; +import org.openrdf.rio.RDFParser; +import org.openrdf.rio.RDFParserFactory; +import org.openrdf.rio.RDFParserRegistry; +import org.openrdf.rio.helpers.StatementCollector; + +import com.bigdata.rdf.sail.webapp.client.MiniMime; +import com.bigdata.rdf.store.AbstractTripleStore; + +/** + * Helper servlet for workbench requests. + */ +public class WorkbenchServlet extends BigdataRDFServlet { + + /** + * + */ + private static final long serialVersionUID = 1L; + + static private final transient Logger log = Logger.getLogger(WorkbenchServlet.class); + + /** + * Flag to signify a workbench operation. + */ + static final transient String ATTR_WORKBENCH = "workbench"; + + /** + * Flag to signify a convert operation. POST an RDF document with a + * content type and an accept header for what it should be converted to. + */ + static final transient String ATTR_CONVERT = "convert"; + + + public WorkbenchServlet() { + + } + + @Override + protected void doPost(final HttpServletRequest req, + final HttpServletResponse resp) throws IOException { + + + if (req.getParameter(ATTR_CONVERT) != null) { + + // Convert from one format to another + doConvert(req, resp); + + } + + } + + /** + * Convert RDF data from one format to another. + */ + private void doConvert(final HttpServletRequest req, + final HttpServletResponse resp) throws IOException { + + final String baseURI = req.getRequestURL().toString(); + + final String namespace = getNamespace(req); + + final long timestamp = getTimestamp(req); + + final AbstractTripleStore tripleStore = getBigdataRDFContext() + .getTripleStore(namespace, timestamp); + + if (tripleStore == null) { + /* + * There is no such triple/quad store instance. + */ + buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); + return; + } + + final String contentType = req.getContentType(); + + if (log.isInfoEnabled()) + log.info("Request body: " + contentType); + + /** + * <a href="https://sourceforge.net/apps/trac/bigdata/ticket/620"> + * UpdateServlet fails to parse MIMEType when doing conneg. </a> + */ + + final RDFFormat requestBodyFormat = RDFFormat.forMIMEType(new MiniMime( + contentType).getMimeType()); + + if (requestBodyFormat == null) { + + buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, + "Content-Type not recognized as RDF: " + contentType); + + return; + + } + + final RDFParserFactory rdfParserFactory = RDFParserRegistry + .getInstance().get(requestBodyFormat); + + if (rdfParserFactory == null) { + + buildResponse(resp, HTTP_INTERNALERROR, MIME_TEXT_PLAIN, + "Parser factory not found: Content-Type=" + + contentType + ", format=" + requestBodyFormat); + + return; + + } + +// final String s= IOUtil.readString(req.getInputStream()); +// System.err.println(s); + + final Graph g = new GraphImpl(); + + try { + + /* + * There is a request body, so let's try and parse it. + */ + + final RDFParser rdfParser = rdfParserFactory + .getParser(); + + rdfParser.setValueFactory(tripleStore.getValueFactory()); + + rdfParser.setVerifyData(true); + + rdfParser.setStopAtFirstError(true); + + rdfParser + .setDatatypeHandling(RDFParser.DatatypeHandling.IGNORE); + + rdfParser.setRDFHandler(new StatementCollector(g)); + + /* + * Run the parser, which will cause statements to be + * inserted. + */ + rdfParser.parse(req.getInputStream(), baseURI); + + sendGraph(req, resp, g); + + } catch (Throwable t) { + + throw BigdataRDFServlet.launderThrowable(t, resp, null); + + } + + } + +} Property changes on: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-03-19 16:44:54
|
Revision: 8000 http://sourceforge.net/p/bigdata/code/8000 Author: mrpersonick Date: 2014-03-19 16:44:49 +0000 (Wed, 19 Mar 2014) Log Message: ----------- fixed the test case to not fail because of the new SPARQL JSON to RDF parser Modified Paths: -------------- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestConneg.java Modified: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestConneg.java =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestConneg.java 2014-03-19 16:00:05 UTC (rev 7999) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestConneg.java 2014-03-19 16:44:49 UTC (rev 8000) @@ -33,6 +33,8 @@ import org.openrdf.query.resultio.TupleQueryResultFormat; import org.openrdf.rio.RDFFormat; +import com.bigdata.rdf.rio.json.BigdataSPARQLResultsJSONParserFactory; + /** * Test suite for content negotiation helper class. * @@ -41,6 +43,13 @@ */ public class TestConneg extends TestCase2 { + +// static { +// +// new BigdataSPARQLResultsJSONParserFactory(); +// +// } + /** * */ @@ -68,7 +77,11 @@ assertEquals(format.getName(), format, util.getRDFFormat()); - assertNull(format.getName(), util.getTupleQueryResultFormat()); + if (!format.getName().equals("JSON")) { + + assertNull(format.getName(), util.getTupleQueryResultFormat()); + + } assertSameArray(new ConnegScore[] {// new ConnegScore(1f, format) },// @@ -90,8 +103,12 @@ final ConnegUtil util = new ConnegUtil(format.getDefaultMIMEType()); - assertNull(format.getName(), util.getRDFFormat()); + if (!format.getName().equals("SPARQL/JSON")) { + + assertNull(format.getName(), util.getRDFFormat()); + } + assertEquals(format.getName(), format, util.getTupleQueryResultFormat()); @@ -152,7 +169,11 @@ final ConnegUtil util = new ConnegUtil(format.getDefaultMIMEType()); - assertNull(format.getName(), util.getRDFFormat()); + if (!format.getName().equals("SPARQL/JSON")) { + + assertNull(format.getName(), util.getRDFFormat()); + + } assertEquals(format.getName(), format, util.getBooleanQueryResultFormat()); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-03-19 16:00:11
|
Revision: 7999 http://sourceforge.net/p/bigdata/code/7999 Author: mrpersonick Date: 2014-03-19 16:00:05 +0000 (Wed, 19 Mar 2014) Log Message: ----------- changed Lexicon's SID materialization strategy Modified Paths: -------------- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2014-03-19 15:13:12 UTC (rev 7998) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2014-03-19 16:00:05 UTC (rev 7999) @@ -2488,6 +2488,15 @@ } } + + /* + * Add the SID terms to the IVs to materialize. + */ + for (IV<?, ?> iv : unrequestedSidTerms) { + + ivs.add(iv); + + } /* * Filter out the inline values first and those that have already @@ -2700,7 +2709,7 @@ if (!ivs.contains(iv)) { - ivs.add(iv); +// ivs.add(iv); unrequested.add(iv); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-03-19 15:13:18
|
Revision: 7998 http://sourceforge.net/p/bigdata/code/7998 Author: mrpersonick Date: 2014-03-19 15:13:12 +0000 (Wed, 19 Mar 2014) Log Message: ----------- changed Lexicon's SID materialization strategy Modified Paths: -------------- branches/RDR/bigdata-rdf/src/java/com/bigdata/bop/rdf/join/ChunkedMaterializationOp.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataStatementIteratorImpl.java Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/bop/rdf/join/ChunkedMaterializationOp.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/bop/rdf/join/ChunkedMaterializationOp.java 2014-03-19 14:32:40 UTC (rev 7997) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/bop/rdf/join/ChunkedMaterializationOp.java 2014-03-19 15:13:12 UTC (rev 7998) @@ -398,56 +398,56 @@ } - /** - * Either add the IV to the list if it needs materialization, or else - * delegate to {@link #handleSid(SidIV, Collection, boolean)} if it's a - * SidIV. - */ - static private void handleIV(final IV<?, ?> iv, - final Collection<IV<?, ?>> ids, - final boolean materializeInlineIVs) { - - if (iv instanceof SidIV) { - - handleSid((SidIV<?>) iv, ids, materializeInlineIVs); - - } else if (iv.needsMaterialization() || materializeInlineIVs) { - - ids.add(iv); - - } - - } - - /** - * Sids need to be handled specially because their individual ISPO - * components might need materialization. - */ - static private void handleSid(final SidIV<?> sid, - final Collection<IV<?, ?>> ids, - final boolean materializeInlineIVs) { - - final ISPO spo = sid.getInlineValue(); - - System.err.println("handling a sid"); - System.err.println("adding s: " + spo.s()); - System.err.println("adding p: " + spo.p()); - System.err.println("adding o: " + spo.o()); - - handleIV(spo.s(), ids, materializeInlineIVs); - - handleIV(spo.p(), ids, materializeInlineIVs); - - handleIV(spo.o(), ids, materializeInlineIVs); - - if (spo.c() != null) { - - handleIV(spo.c(), ids, materializeInlineIVs); - - } +// /** +// * Either add the IV to the list if it needs materialization, or else +// * delegate to {@link #handleSid(SidIV, Collection, boolean)} if it's a +// * SidIV. +// */ +// static private void handleIV(final IV<?, ?> iv, +// final Collection<IV<?, ?>> ids, +// final boolean materializeInlineIVs) { +// +// if (iv instanceof SidIV) { +// +// handleSid((SidIV<?>) iv, ids, materializeInlineIVs); +// +// } else if (iv.needsMaterialization() || materializeInlineIVs) { +// +// ids.add(iv); +// +// } +// +// } +// +// /** +// * Sids need to be handled specially because their individual ISPO +// * components might need materialization. +// */ +// static private void handleSid(final SidIV<?> sid, +// final Collection<IV<?, ?>> ids, +// final boolean materializeInlineIVs) { +// +// final ISPO spo = sid.getInlineValue(); +// +// System.err.println("handling a sid"); +// System.err.println("adding s: " + spo.s()); +// System.err.println("adding p: " + spo.p()); +// System.err.println("adding o: " + spo.o()); +// +// handleIV(spo.s(), ids, materializeInlineIVs); +// +// handleIV(spo.p(), ids, materializeInlineIVs); +// +// handleIV(spo.o(), ids, materializeInlineIVs); +// +// if (spo.c() != null) { +// +// handleIV(spo.c(), ids, materializeInlineIVs); +// +// } +// +// } - } - /** * Resolve the term identifiers in the {@link IBindingSet} using the map * populated when we fetched the current chunk. Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2014-03-19 14:32:40 UTC (rev 7997) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2014-03-19 15:13:12 UTC (rev 7998) @@ -36,6 +36,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -104,6 +105,7 @@ import com.bigdata.rdf.model.BigdataValueSerializer; import com.bigdata.rdf.rio.StatementBuffer; import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.SPO; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.vocab.NoVocabulary; import com.bigdata.rdf.vocab.Vocabulary; @@ -2471,7 +2473,23 @@ // BlobIVs which must be resolved against an index. final Collection<BlobIV<?>> blobIVs = new LinkedList<BlobIV<?>>(); + final Set<IV<?, ?>> unrequestedSidTerms = new LinkedHashSet<IV<?, ?>>(); + /* + * We need to materialize terms inside of SIDs so that the SIDs + * can be materialized properly. + */ + for (IV<?,?> iv : ivs) { + + if (iv instanceof SidIV) { + + handleSid((SidIV) iv, ivs, unrequestedSidTerms); + + } + + } + + /* * Filter out the inline values first and those that have already * been materialized and cached. */ @@ -2602,8 +2620,7 @@ } /* - * Defer SidIVs until the end so that their ISPO components can be - * materialized first. + * SidIVs require special handling. */ for (IV<?,?> iv : ivs) { @@ -2613,10 +2630,23 @@ // translate it into a value directly ret.put(iv, iv.asValue(this)); + } } + /* + * Remove any IVs that were not explicitly requested in the method + * call but that got pulled into materialization because of a SID. + */ + for (IV<?,?> iv : unrequestedSidTerms) { + + ivs.remove(iv); + + ret.remove(iv); + + } + final long elapsed = System.currentTimeMillis() - begin; if (log.isInfoEnabled()) @@ -2629,6 +2659,58 @@ } /** + * Add the terms inside a SID to the collection of IVs to materialize if + * they are not already there. + */ + @SuppressWarnings("rawtypes") + final private void handleSid(final SidIV sid, + final Collection<IV<?, ?>> ivs, + final Set<IV<?, ?>> unrequested) { + + final ISPO spo = sid.getInlineValue(); + + handleTerm(spo.s(), ivs, unrequested); + + handleTerm(spo.p(), ivs, unrequested); + + handleTerm(spo.o(), ivs, unrequested); + + if (spo.c() != null) { + + handleTerm(spo.c(), ivs, unrequested); + + } + + } + + /** + * Add the terms inside a SID to the collection of IVs to materialize if + * they are not already there. + */ + @SuppressWarnings("rawtypes") + final private void handleTerm(final IV<?, ?> iv, + final Collection<IV<?, ?>> ivs, + final Set<IV<?, ?>> unrequested) { + + if (iv instanceof SidIV) { + + handleSid((SidIV) iv, ivs, unrequested); + + } else { + + if (!ivs.contains(iv)) { + + ivs.add(iv); + + unrequested.add(iv); + + } + + } + + } + + /** * We need to cache the BigdataValues on the IV components within the * SidIV so that the SidIV can materialize itself into a BigdataBNode * properly. Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataStatementIteratorImpl.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataStatementIteratorImpl.java 2014-03-19 14:32:40 UTC (rev 7997) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataStatementIteratorImpl.java 2014-03-19 15:13:12 UTC (rev 7998) @@ -243,12 +243,12 @@ private void handleIV(final IV<?, ?> iv, final Collection<IV<?, ?>> ids) { - if (iv instanceof SidIV) { +// if (iv instanceof SidIV) { +// +// handleSid((SidIV<?>) iv, ids); +// +// } - handleSid((SidIV<?>) iv, ids); - - } - if (bnodes == null || !bnodes.containsKey(iv)) { ids.add(iv); @@ -257,30 +257,30 @@ } - /** - * Sids need to be handled specially because their individual ISPO - * components might need materialization as well. - */ - private void handleSid(final SidIV<?> sid, - final Collection<IV<?, ?>> ids) { - - final ISPO spo = sid.getInlineValue(); - - handleIV(spo.s(), ids); - - handleIV(spo.p(), ids); - - handleIV(spo.o(), ids); - - if (spo.c() != null) { - - handleIV(spo.c(), ids); - - } +// /** +// * Sids need to be handled specially because their individual ISPO +// * components might need materialization as well. +// */ +// private void handleSid(final SidIV<?> sid, +// final Collection<IV<?, ?>> ids) { +// +// final ISPO spo = sid.getInlineValue(); +// +// handleIV(spo.s(), ids); +// +// handleIV(spo.p(), ids); +// +// handleIV(spo.o(), ids); +// +// if (spo.c() != null) { +// +// handleIV(spo.c(), ids); +// +// } +// +// } - } - /** * Resolve a term identifier to the {@link BigdataValue}, checking the This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-03-19 14:32:45
|
Revision: 7997 http://sourceforge.net/p/bigdata/code/7997 Author: mrpersonick Date: 2014-03-19 14:32:40 +0000 (Wed, 19 Mar 2014) Log Message: ----------- removed the commas from the SID representation Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-03-18 18:59:02 UTC (rev 7996) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-03-19 14:32:40 UTC (rev 7997) @@ -867,7 +867,7 @@ /* Utility functions */ function getSID(binding) { - return '<<\n <' + binding.value['s'].value + '>,\n<' + binding.value['p'].value + '>,\n <' + binding.value['o'].value + '>\n>>'; + return '<<\n <' + binding.value['s'].value + '>\n<' + binding.value['p'].value + '>\n <' + binding.value['o'].value + '>\n>>'; } function parseSID(sid) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-03-18 18:59:06
|
Revision: 7996 http://sourceforge.net/p/bigdata/code/7996 Author: mrpersonick Date: 2014-03-18 18:59:02 +0000 (Tue, 18 Mar 2014) Log Message: ----------- added support for a JSON to RDF parser. ticket 862 Modified Paths: -------------- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriter.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterFactoryForConstruct.java branches/RDR/bigdata-rdf/src/resources/service-providers/META-INF/services/org.openrdf.rio.RDFParserFactory branches/RDR/bigdata-war/src/html/js/workbench.js Added Paths: ----------- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java branches/RDR/bigdata-sails/lib/jackson-core-2.3.3-20140314.203554-3.jar Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java 2014-03-18 18:16:29 UTC (rev 7995) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java 2014-03-18 18:59:02 UTC (rev 7996) @@ -38,6 +38,7 @@ import org.openrdf.rio.RDFWriterRegistry; import com.bigdata.rdf.model.StatementEnum; +import com.bigdata.rdf.rio.json.BigdataSPARQLResultsJSONParserFactory; import com.bigdata.rdf.rio.json.BigdataSPARQLResultsJSONWriterFactoryForConstruct; import com.bigdata.rdf.rio.json.BigdataSPARQLResultsJSONWriterFactoryForSelect; import com.bigdata.rdf.rio.ntriples.BigdataNTriplesParserFactory; @@ -118,16 +119,22 @@ r.add(new BigdataNTriplesParserFactory()); - // subclassed the turtle parser to allow for fully numeric bnode ids + // subclassed the turtle parser for RDR r.add(new BigdataTurtleParserFactory()); + /* + * Allows parsing of JSON SPARQL Results with an {s,p,o,[c]} header. + * RDR-enabled. + */ + r.add(new BigdataSPARQLResultsJSONParserFactory()); + } { final TupleQueryResultWriterRegistry r = TupleQueryResultWriterRegistry.getInstance(); - // add our custom RDR-enabled JSON writer + // add our custom RDR-enabled JSON writer (RDR-enabled) r.add(new BigdataSPARQLResultsJSONWriterFactoryForSelect()); } @@ -139,8 +146,10 @@ // r.add(new BigdataRDFXMLWriterFactory()); + // RDR-enabled r.add(new BigdataTurtleWriterFactory()); + // RDR-enabled r.add(new BigdataSPARQLResultsJSONWriterFactoryForConstruct()); } Added: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java (rev 0) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java 2014-03-18 18:59:02 UTC (rev 7996) @@ -0,0 +1,574 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.rio.json; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.LineNumberReader; +import java.io.Reader; + +import org.apache.log4j.Logger; +import org.openrdf.model.Resource; +import org.openrdf.model.Statement; +import org.openrdf.model.URI; +import org.openrdf.model.Value; +import org.openrdf.model.ValueFactory; +import org.openrdf.rio.RDFFormat; +import org.openrdf.rio.RDFHandlerException; +import org.openrdf.rio.RDFParseException; +import org.openrdf.rio.helpers.RDFParserBase; + +import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.model.BigdataValueFactory; +import com.bigdata.rdf.model.BigdataValueFactoryImpl; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; + +/** + * RDF parser for JSON SPARQL Results files that have the variables (s, p, o, + * and optionally c) in the header. + */ +public class BigdataSPARQLResultsJSONParser extends RDFParserBase { + + protected static final transient Logger log = + Logger.getLogger(BigdataSPARQLResultsJSONParser.class); + + + private LineNumberReader lineReader; + + private BigdataValueFactory vf; + + /** + * Default ctor uses a BigdataValueFactory with a namespace of "". Used + * for testing. + */ + public BigdataSPARQLResultsJSONParser() { + this(BigdataValueFactoryImpl.getInstance("")); + } + + /** + * Construct a parser with the supplied BigdataValueFactory. + */ + public BigdataSPARQLResultsJSONParser(final BigdataValueFactory vf) { + super(vf); + + this.vf = vf; + } + + /** + * Set the value factory. Must be a BigdataValueFactory because of the + * RDR syntax support. + */ + public void setValueFactory(final ValueFactory vf) { + if (vf instanceof BigdataValueFactory) { + this.vf = (BigdataValueFactory) vf; + } else { + throw new IllegalArgumentException(); + } + } + + /** + * Returns {@link BigdataSPARQLResultsJSONParserFactory#JSON}. + */ + @Override + public RDFFormat getRDFFormat() { + + return BigdataSPARQLResultsJSONParserFactory.JSON; + + } + + /** + * Parse the supplied input stream into RDF. + */ + @Override + public void parse(final InputStream is, final String baseURI) throws IOException, + RDFParseException, RDFHandlerException { + + parse(new InputStreamReader(is), baseURI); + + } + + /** + * Parse the supplied reader into RDF. + */ + @Override + public void parse(final Reader r, final String baseURI) throws IOException, + RDFParseException, RDFHandlerException { + + lineReader = new LineNumberReader(r); + // Start counting lines at 1: + lineReader.setLineNumber(1); + + // read graph from JSON in request + + final JsonFactory factory = new JsonFactory(); + + final JsonParser parser = factory.createJsonParser(lineReader); + +// final JsonParser parser = Json.createParser(lineReader); + + JsonToken event = parser.nextToken(); + + if (event != JsonToken.START_OBJECT) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME && !(parser.getCurrentName().equals("head"))) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.START_OBJECT) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME && !(parser.getCurrentName().equals("vars"))) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.START_ARRAY) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.VALUE_STRING && !(parser.getCurrentName().equals("s"))) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.VALUE_STRING && !(parser.getCurrentName().equals("p"))) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.VALUE_STRING && !(parser.getCurrentName().equals("o"))) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event == JsonToken.VALUE_STRING) { + + if (!(parser.getCurrentName().equals("c"))) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + } + + if (event != JsonToken.END_ARRAY) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.END_OBJECT) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME && !(parser.getCurrentName().equals("results"))) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.START_OBJECT) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME && !(parser.getCurrentName().equals("bindings"))) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.START_ARRAY) { + reportFatalError("unexpected parse event: " + event); + } + + +// boolean startingBindings = false; +// boolean breakLoop = false; +// +// while (parser.hasNext()) { +// JsonToken event = parser.nextToken(); +// switch (event) { +// case START_ARRAY: +// if (startingBindings) +// breakLoop = true; +// case END_ARRAY: +// case START_OBJECT: +// case END_OBJECT: +// case VALUE_FALSE: +// case VALUE_NULL: +// case VALUE_TRUE: +// System.err.println(event.toString()); +// break; +// case KEY_NAME: +// if (parser.getString().equals("bindings")) +// startingBindings = true; +// System.err.println(event.toString() + " " +// + parser.getString()); +// break; +// case VALUE_STRING: +// case VALUE_NUMBER: +// System.err.println(event.toString() + " " +// + parser.getString()); +// break; +// } +// if (breakLoop) +// break; +// } + + rdfHandler.startRDF(); + + Statement stmt; + while ((stmt = parseStatement(parser)) != null) { + + if (log.isDebugEnabled()) + log.debug(stmt); + + rdfHandler.handleStatement(stmt); + + } + + rdfHandler.endRDF(); + + } + + /** + * Parse a statement from the JSON stream. + */ + private final BigdataStatement parseStatement( + final JsonParser parser) + throws RDFParseException, JsonParseException, IOException { + + JsonToken event = parser.nextToken(); + + if (event == null || event == JsonToken.END_ARRAY) { + + return null; + + } + + if (event != JsonToken.START_OBJECT) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME && !(parser.getCurrentName().equals("s"))) { + reportFatalError("unexpected parse event: " + event); + } + + final Resource s = (Resource) parseValue(parser); + + event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME && !(parser.getCurrentName().equals("p"))) { + reportFatalError("unexpected parse event: " + event); + } + + final URI p = (URI) parseValue(parser); + + event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME && !(parser.getCurrentName().equals("o"))) { + reportFatalError("unexpected parse event: " + event); + } + + final Value o = parseValue(parser); + + event = parser.nextToken(); + + switch (event) { + case END_OBJECT: + return vf.createStatement(s, p, o); + case FIELD_NAME: + if (!(parser.getCurrentName().equals("c"))) { + reportFatalError("unexpected parse event: " + event); + } + final Resource c = (Resource) parseValue(parser); + event = parser.nextToken(); + if (event != JsonToken.END_OBJECT) { + reportFatalError("unexpected parse event: " + event); + } + return vf.createStatement(s, p, o, c); + default: + reportFatalError("unexpected parse event: " + event); + } + + // unreachable code + return null; + + } + + /** + * Parse a value from the JSON stream. + */ + protected Value parseValue(final JsonParser parser) + throws RDFParseException, JsonParseException, IOException { + + JsonToken event = parser.nextToken(); + + if (event != JsonToken.START_OBJECT) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME && !parser.getCurrentName().equals("type")) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.VALUE_STRING) { + reportFatalError("unexpected parse event: " + event); + } + + final String type = parser.getText(); + + Value val = null; + + if ("sid".equals(type)) { + + val = parseSid(parser); + + } else if ("uri".equals(type)) { + + val = parseURI(parser); + + } else if ("bnode".equals(type)) { + + val = parseBNode(parser); + + } else if ("literal".equals(type)) { + + val = parseLiteral(parser); + + } else if ("typed-literal".equals(type)) { + + val = parseTypedLiteral(parser); + + } else { + + reportFatalError("unexpected parse event: " + event); + + } + + event = parser.nextToken(); + + if (event != JsonToken.END_OBJECT) { + reportFatalError("unexpected parse event: " + event); + } + + return val; + + } + + /** + * Parse a sid from the JSON stream. + */ + protected Value parseSid(final JsonParser parser) + throws RDFParseException, JsonParseException, IOException { + + JsonToken event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME && !parser.getCurrentName().equals("value")) { + reportFatalError("unexpected parse event: " + event); + } + + final BigdataStatement stmt = parseStatement(parser); + + return vf.createBNode(stmt); + + } + + /** + * Parse a URI from the JSON stream. + */ + protected Value parseURI(final JsonParser parser) + throws RDFParseException, JsonParseException, IOException { + + JsonToken event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME && !parser.getCurrentName().equals("value")) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.VALUE_STRING) { + reportFatalError("unexpected parse event: " + event); + } + + return vf.createURI(parser.getText()); + + } + + /** + * Parse a bnode from the JSON stream. + */ + protected Value parseBNode(final JsonParser parser) + throws RDFParseException, JsonParseException, IOException { + + JsonToken event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME && !parser.getCurrentName().equals("value")) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.VALUE_STRING) { + reportFatalError("unexpected parse event: " + event); + } + + return vf.createBNode(parser.getText()); + + } + + /** + * Parse a plain literal or language-tagged literal from the JSON stream. + */ + protected Value parseLiteral(final JsonParser parser) + throws RDFParseException, JsonParseException, IOException { + + JsonToken event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME) + reportFatalError("unexpected parse event: " + event); + + if (parser.getCurrentName().equals("xml:lang")) { + + event = parser.nextToken(); + + if (event != JsonToken.VALUE_STRING) { + reportFatalError("unexpected parse event: " + event); + } + + final String lang = parser.getText(); + + event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME && !parser.getCurrentName().equals("value")) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.VALUE_STRING) { + reportFatalError("unexpected parse event: " + event); + } + + return vf.createLiteral(parser.getText(), lang); + + } else if (parser.getCurrentName().equals("value")) { + + event = parser.nextToken(); + + if (event != JsonToken.VALUE_STRING) { + reportFatalError("unexpected parse event: " + event); + } + + return vf.createLiteral(parser.getText()); + + } else { + + reportFatalError("unexpected parse event: " + event); + + // unreachable code + return null; + + } + + } + + /** + * Parse a typed literal from the JSON stream. + */ + protected Value parseTypedLiteral(final JsonParser parser) + throws RDFParseException, JsonParseException, IOException { + + JsonToken event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME && !parser.getCurrentName().equals("datatype")) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.VALUE_STRING) { + reportFatalError("unexpected parse event: " + event); + } + + final URI datatype = vf.createURI(parser.getText()); + + event = parser.nextToken(); + + if (event != JsonToken.FIELD_NAME && !parser.getCurrentName().equals("value")) { + reportFatalError("unexpected parse event: " + event); + } + + event = parser.nextToken(); + + if (event != JsonToken.VALUE_STRING) { + reportFatalError("unexpected parse event: " + event); + } + + return vf.createLiteral(parser.getText(), datatype); + + } + + /** + * Overrides {@link RDFParserBase#reportFatalError(String)}, adding line + * number information to the error. + */ + @Override + protected void reportFatalError(String msg) throws RDFParseException { + + reportFatalError(msg, lineReader.getLineNumber(), -1); + + } + + +} Property changes on: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java (rev 0) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java 2014-03-18 18:59:02 UTC (rev 7996) @@ -0,0 +1,68 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.rio.json; + +import java.nio.charset.Charset; + +import org.openrdf.rio.RDFFormat; +import org.openrdf.rio.RDFParser; +import org.openrdf.rio.RDFParserFactory; +import org.openrdf.rio.turtle.TurtleParser; + +/** + * An {@link RDFParserFactory} for Turtle parsers. + * + * @author Arjohn Kampman + * @openrdf + */ +public class BigdataSPARQLResultsJSONParserFactory implements RDFParserFactory { + + public static final RDFFormat JSON = new RDFFormat( + "JSON", // name + "application/sparql-results+json", // mime-type + Charset.forName("UTF-8"), // charset + "json", // file extension + false, // supports namespaces + true // supports contexts + ); + + static { + + RDFFormat.register(JSON); + + } + + /** + * Returns {@link RDFFormat#TURTLE}. + */ + public RDFFormat getRDFFormat() { + return JSON; + } + + /** + * Returns a new instance of {@link TurtleParser}. + */ + public RDFParser getParser() { + return new BigdataSPARQLResultsJSONParser(); + } +} Property changes on: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriter.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriter.java 2014-03-18 18:16:29 UTC (rev 7995) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriter.java 2014-03-18 18:59:02 UTC (rev 7996) @@ -107,15 +107,15 @@ writer.write(", "); writeKey("value"); openBraces(); - writeKeyValue("sid-s", stmt.getSubject()); + writeKeyValue("s", stmt.getSubject()); writeComma(); - writeKeyValue("sid-p", stmt.getPredicate()); + writeKeyValue("p", stmt.getPredicate()); writeComma(); - writeKeyValue("sid-o", stmt.getObject()); + writeKeyValue("o", stmt.getObject()); if (stmt.getContext() != null) { writeComma(); - writeKeyValue("sid-c", stmt.getContext()); + writeKeyValue("c", stmt.getContext()); } closeBraces(); @@ -469,7 +469,7 @@ @Override public RDFFormat getRDFFormat() { - return BigdataSPARQLResultsJSONWriterFactoryForConstruct.JSON; + return BigdataSPARQLResultsJSONParserFactory.JSON; } } Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterFactoryForConstruct.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterFactoryForConstruct.java 2014-03-18 18:16:29 UTC (rev 7995) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterFactoryForConstruct.java 2014-03-18 18:59:02 UTC (rev 7996) @@ -7,7 +7,6 @@ import java.io.OutputStream; import java.io.Writer; -import java.nio.charset.Charset; import org.openrdf.query.resultio.TupleQueryResultFormat; import org.openrdf.query.resultio.TupleQueryResultWriterFactory; @@ -23,26 +22,11 @@ */ public class BigdataSPARQLResultsJSONWriterFactoryForConstruct implements RDFWriterFactory { - public static final RDFFormat JSON = new RDFFormat( - "JSON", // name - "application/sparql-results+json", // mime-type - Charset.forName("UTF-8"), // charset - "json", // file extension - false, // supports namespaces - true // supports contexts - ); - - static { - - RDFFormat.register(JSON); - - } - /** * Returns {@link TupleQueryResultFormat#JSON}. */ public RDFFormat getRDFFormat() { - return JSON; + return BigdataSPARQLResultsJSONParserFactory.JSON; } /** Modified: branches/RDR/bigdata-rdf/src/resources/service-providers/META-INF/services/org.openrdf.rio.RDFParserFactory =================================================================== --- branches/RDR/bigdata-rdf/src/resources/service-providers/META-INF/services/org.openrdf.rio.RDFParserFactory 2014-03-18 18:16:29 UTC (rev 7995) +++ branches/RDR/bigdata-rdf/src/resources/service-providers/META-INF/services/org.openrdf.rio.RDFParserFactory 2014-03-18 18:59:02 UTC (rev 7996) @@ -1,3 +1,4 @@ com.bigdata.rdf.rio.nquads.NQuadsParserFactory com.bigdata.rdf.rio.ntriples.BigdataNTriplesParserFactory com.bigdata.rdf.rio.turtle.BigdataTurtleParserFactory +com.bigdata.rdf.rio.json.BigdataSPARQLResultsJSONParserFactory \ No newline at end of file Added: branches/RDR/bigdata-sails/lib/jackson-core-2.3.3-20140314.203554-3.jar =================================================================== (Binary files differ) Index: branches/RDR/bigdata-sails/lib/jackson-core-2.3.3-20140314.203554-3.jar =================================================================== --- branches/RDR/bigdata-sails/lib/jackson-core-2.3.3-20140314.203554-3.jar 2014-03-18 18:16:29 UTC (rev 7995) +++ branches/RDR/bigdata-sails/lib/jackson-core-2.3.3-20140314.203554-3.jar 2014-03-18 18:59:02 UTC (rev 7996) Property changes on: branches/RDR/bigdata-sails/lib/jackson-core-2.3.3-20140314.203554-3.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-03-18 18:16:29 UTC (rev 7995) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-03-18 18:59:02 UTC (rev 7996) @@ -867,7 +867,7 @@ /* Utility functions */ function getSID(binding) { - return '<< <' + binding.value['sid-s'].value + '>\n<' + binding.value['sid-p'].value + '>\n<' + binding.value['sid-o'].value + '> >>'; + return '<<\n <' + binding.value['s'].value + '>,\n<' + binding.value['p'].value + '>,\n <' + binding.value['o'].value + '>\n>>'; } function parseSID(sid) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-03-18 18:16:32
|
Revision: 7995 http://sourceforge.net/p/bigdata/code/7995 Author: tobycraig Date: 2014-03-18 18:16:29 +0000 (Tue, 18 Mar 2014) Log Message: ----------- #843 - Fixed query cancellation in status tab Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-03-18 09:41:01 UTC (rev 7994) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-03-18 18:16:29 UTC (rev 7995) @@ -787,25 +787,40 @@ /* Status */ -$('#tab-selector a[data-target=status]').click(function(e) { +$('#tab-selector a[data-target=status]').click(getStatus); + +function getStatus(e) { + if(e) { + e.preventDefault(); + } $.get('/bigdata/status', function(data) { - var accepted = data.match(/Accepted query count=(\d+)/)[1]; - var running = data.match(/Running query count=(\d+)/)[1]; - var numbers = $(data).get(-1).textContent; + // get data inside a jQuery object + data = $('<div>').append(data); + getStatusNumbers(data); + }); +} + +function getStatusNumbers(data) { + var accepted = data.text().match(/Accepted query count=(\d+)/)[1]; + var running = data.text().match(/Running query count=(\d+)/)[1]; + var numbers = $(data).find('pre')[0].textContent; $('#accepted-query-count').html(accepted); $('#running-query-count').html(running); $('#status-numbers').html(numbers); - }); -}); +} $('#show-queries').click(function(e) { e.preventDefault(); $.get('/bigdata/status?showQueries', function(data) { + // get data inside a jQuery object + data = $('<div>').append(data); + + // update status numbers + getStatusNumbers(data); + // clear current list $('#running-queries').empty(); - // get data inside a jQuery object - data = $('<div>').append(data); data.find('h1').each(function(i, e) { // per running query, data is structured h1 form (with numbers/cancel data) h2 pre (with SPARQL) e = $(e); @@ -834,7 +849,7 @@ e.preventDefault(); if(confirm('Cancel query?')) { var id = $(this).data('queryId'); - $.post('/bigdata/?cancel&queryId=' + id); + $.post('/bigdata/status?cancelQuery&queryId=' + id, function() { getStatus(); }); $(this).parents('li').remove(); } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-03-18 09:41:04
|
Revision: 7994 http://sourceforge.net/p/bigdata/code/7994 Author: martyncutcher Date: 2014-03-18 09:41:01 +0000 (Tue, 18 Mar 2014) Log Message: ----------- Fix for #855 to ensure a bad null child addr is not persisted and fix to TestCase to ensure valid data now it is checked! Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_4/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java branches/BIGDATA_RELEASE_1_2_4/bigdata/src/test/com/bigdata/btree/data/AbstractNodeDataRecordTestCase.java Modified: branches/BIGDATA_RELEASE_1_2_4/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_4/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java 2014-03-18 04:38:07 UTC (rev 7993) +++ branches/BIGDATA_RELEASE_1_2_4/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java 2014-03-18 09:41:01 UTC (rev 7994) @@ -219,9 +219,9 @@ final long childAddr = node.getChildAddr(i); -// if (childAddr == IRawStore.NULL) -// throw new AssertionError("Child is not persistent: index=" + i -// + " out of " + nkeys + " entries, " + node.toString()); + if (childAddr == IRawStore.NULL) + throw new AssertionError("Child is not persistent: index=" + i + + " out of " + nkeys + " entries, " + node.toString()); buf.putLong(childAddr); Modified: branches/BIGDATA_RELEASE_1_2_4/bigdata/src/test/com/bigdata/btree/data/AbstractNodeDataRecordTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_4/bigdata/src/test/com/bigdata/btree/data/AbstractNodeDataRecordTestCase.java 2014-03-18 04:38:07 UTC (rev 7993) +++ branches/BIGDATA_RELEASE_1_2_4/bigdata/src/test/com/bigdata/btree/data/AbstractNodeDataRecordTestCase.java 2014-03-18 09:41:01 UTC (rev 7994) @@ -79,6 +79,9 @@ final long minimumVersionTimestamp = 0L; final long maximumVersionTimestamp = 0L; + // Must not be 0L. See #855. + childAddr[0] = 12L; + final INodeData expected = new MockNodeData(new ReadOnlyKeysRaba(nkeys, keys), spannedTupleCount, childAddr, childEntryCount, hasVersionTimestamps, minimumVersionTimestamp, @@ -104,6 +107,9 @@ final long minimumVersionTimestamp = System.currentTimeMillis(); final long maximumVersionTimestamp = System.currentTimeMillis() + 20; + // Must not be 0L. See #855. + childAddr[0] = 12L; + final INodeData expected = new MockNodeData(new ReadOnlyKeysRaba(nkeys, keys), spannedTupleCount, childAddr, childEntryCount, hasVersionTimestamps, minimumVersionTimestamp, This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-03-18 04:38:11
|
Revision: 7993 http://sourceforge.net/p/bigdata/code/7993 Author: tobycraig Date: 2014-03-18 04:38:07 +0000 (Tue, 18 Mar 2014) Log Message: ----------- #843 - Initial improvements to status tab Modified Paths: -------------- branches/RDR/bigdata-war/src/html/css/style.css branches/RDR/bigdata-war/src/html/js/workbench.js branches/RDR/bigdata-war/src/html/new.html Modified: branches/RDR/bigdata-war/src/html/css/style.css =================================================================== --- branches/RDR/bigdata-war/src/html/css/style.css 2014-03-17 18:33:10 UTC (rev 7992) +++ branches/RDR/bigdata-war/src/html/css/style.css 2014-03-18 04:38:07 UTC (rev 7993) @@ -191,3 +191,23 @@ pre { font-family: monospace; } + +#running-queries li { + margin: 10px 0; +} + +#running-queries div { + border: 1px solid; + border-bottom: none; + padding: 10px; +} + +#running-queries .query { + max-height: 50px; + overflow: scroll; +} + +#running-queries div.query-details { + border-bottom: 1px solid; +} + Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-03-17 18:33:10 UTC (rev 7992) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-03-18 04:38:07 UTC (rev 7993) @@ -20,12 +20,6 @@ showTab($(this).data('target')); }); -if(window.location.hash) { - showTab(window.location.hash.substr(1)); -} else { - $('#tab-selector a:first').click(); -} - function showTab(tab) { $('.tab').hide(); $('#' + tab + '-tab').show(); @@ -795,10 +789,58 @@ $('#tab-selector a[data-target=status]').click(function(e) { $.get('/bigdata/status', function(data) { - $('#status-tab .box').html(data); + var accepted = data.match(/Accepted query count=(\d+)/)[1]; + var running = data.match(/Running query count=(\d+)/)[1]; + var numbers = $(data).get(-1).textContent; + $('#accepted-query-count').html(accepted); + $('#running-query-count').html(running); + $('#status-numbers').html(numbers); }); }); +$('#show-queries').click(function(e) { + e.preventDefault(); + $.get('/bigdata/status?showQueries', function(data) { + // clear current list + $('#running-queries').empty(); + + // get data inside a jQuery object + data = $('<div>').append(data); + data.find('h1').each(function(i, e) { + // per running query, data is structured h1 form (with numbers/cancel data) h2 pre (with SPARQL) + e = $(e); + // get numbers string, which includes cancel link + var form = e.next(); + var numbers = form.find('p')[0].textContent; + // remove cancel link + numbers = numbers.substring(0, numbers.lastIndexOf(',')); + // get query id + var queryId = form.find('input[type=hidden]').val(); + // get SPARQL + var sparql = form.next().next().html(); + + // got all data, create a li for each query + var li = $('<li><div class="query"><pre>' + sparql + '</pre></div><div class="query-numbers">' + numbers + ', <a href="#" class="cancel-query">Cancel</a></div><div class="query-details"><a href="#" class="query-details collapsed">Details</a></div>'); + li.find('a').data('queryId', queryId); + $('#running-queries').append(li); + }); + + $('.cancel-query').click(cancelQuery); + $('a.query-details').click(getQueryDetails); + }); +}); + +function cancelQuery(e) { + e.preventDefault(); + if(confirm('Cancel query?')) { + var id = $(this).data('queryId'); + $.post('/bigdata/?cancel&queryId=' + id); + $(this).parents('li').remove(); + } +} + +function getQueryDetails(e) {} + /* Performance */ $('#tab-selector a[data-target=performance]').click(function(e) { @@ -823,4 +865,10 @@ return $('<div/>').text(text).html(); } +if(window.location.hash) { + $('a[data-target=' + window.location.hash.substring(1) + ']').click(); +} else { + $('#tab-selector a:first').click(); +} + }); Modified: branches/RDR/bigdata-war/src/html/new.html =================================================================== --- branches/RDR/bigdata-war/src/html/new.html 2014-03-17 18:33:10 UTC (rev 7992) +++ branches/RDR/bigdata-war/src/html/new.html 2014-03-18 04:38:07 UTC (rev 7993) @@ -154,8 +154,14 @@ <div class="tab" id="status-tab"> - <div class="box"></div> - + <div class="box"> + <p>Accepted query count: <span id="accepted-query-count"></span></p> + <p>Running query count: <span id="running-query-count"></span></p> + <p>Show <a href="#" id="show-queries">queries</a>, <a href="#" id="show-query-details">query details</a>.</p> + <pre id="status-numbers"></pre> + <ul id="running-queries"></ul> + </div> + </div> <div class="tab" id="performance-tab"> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-03-17 18:33:15
|
Revision: 7992 http://sourceforge.net/p/bigdata/code/7992 Author: tobycraig Date: 2014-03-17 18:33:10 +0000 (Mon, 17 Mar 2014) Log Message: ----------- #827 - Split SIDs across lines; restored headings to incoming links etc Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-03-17 15:09:50 UTC (rev 7991) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-03-17 18:33:10 UTC (rev 7992) @@ -589,6 +589,7 @@ var text = binding.value; } text = escapeHTML(text); + text = text.replace(/\n/g, '<br>'); if(binding.type == 'typed-literal') { var tdData = ' class="literal" data-datatype="' + binding.datatype + '"'; } else { @@ -633,10 +634,10 @@ loadURI(uri); // if this is a SID, make the components clickable - var re = /<< <([^<>]*)> <([^<>]*)> <([^<>]*)> >>/; + var re = /<< *<([^<>]*)> *<([^<>]*)> *<([^<>]*)> *>>/; var match = uri.match(re); if(match) { - $('#explore-header').html('<h1><< <<a href="#">' + match[1] + '</a> > <<a href="#">' + match[2] + '</a> > <<a href="#">' + match[3] + '</a> > >></h1>'); + $('#explore-header').html('<h1><< <<a href="#">' + match[1] + '</a>><br><<a href="#">' + match[2] + '</a> ><br><<a href="#">' + match[3] + '</a> > >></h1>'); $('#explore-header h1 a').click(function(e) { e.preventDefault(); explore(this.text); @@ -649,7 +650,8 @@ function loadURI(target) { // identify if this is a vertex or a SID - var re = /<< (?:<[^<>]*> ){3}>>/; + target = target.trim().replace(/\n/g, ' '); + var re = /<< *(?:<[^<>]*> *){3} *>>/; var vertex = !target.match(re); var vertexQuery = '\ @@ -730,7 +732,7 @@ } else { var output = col.value; } - output = escapeHTML(output); + output = escapeHTML(output).replace(/\n/g, '<br>'); if(col.type == 'uri' || col.type == 'sid') { output = '<a href="#">' + output + '</a>'; } @@ -764,8 +766,11 @@ var sections = {incoming: 'Incoming Links', outgoing: 'Outgoing Links', attributes: 'Attributes'}; for(var k in sections) { - if($('#explore-' + k + ' table tr').length == 0) { - $('#explore-' + k).html('No ' + sections[k]); + var id = '#explore-' + k; + if($(id + ' table tr').length == 0) { + $(id).html('No ' + sections[k]); + } else { + $(id).prepend('<h1>' + sections[k] + '</h1>'); } } @@ -805,7 +810,7 @@ /* Utility functions */ function getSID(binding) { - return '<< <' + binding.value['sid-s'].value + '> <' + binding.value['sid-p'].value + '> <' + binding.value['sid-o'].value + '> >>'; + return '<< <' + binding.value['sid-s'].value + '>\n<' + binding.value['sid-p'].value + '>\n<' + binding.value['sid-o'].value + '> >>'; } function parseSID(sid) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-03-17 15:09:54
|
Revision: 7991 http://sourceforge.net/p/bigdata/code/7991 Author: martyncutcher Date: 2014-03-17 15:09:50 +0000 (Mon, 17 Mar 2014) Log Message: ----------- rollback r7990 to check unit test failures Revision Links: -------------- http://sourceforge.net/p/bigdata/code/7990 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_4/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java Modified: branches/BIGDATA_RELEASE_1_2_4/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_4/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java 2014-03-17 15:00:40 UTC (rev 7990) +++ branches/BIGDATA_RELEASE_1_2_4/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java 2014-03-17 15:09:50 UTC (rev 7991) @@ -219,11 +219,11 @@ final long childAddr = node.getChildAddr(i); - if (childAddr == IRawStore.NULL) - throw new AssertionError("Child is not persistent: index=" + i - + " out of " + nkeys + " entries, " + node.toString()); +// if (childAddr == IRawStore.NULL) +// throw new AssertionError("Child is not persistent: index=" + i +// + " out of " + nkeys + " entries, " + node.toString()); - buf.putLong(node.getChildAddr(i)); + buf.putLong(childAddr); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-03-17 15:00:46
|
Revision: 7990 http://sourceforge.net/p/bigdata/code/7990 Author: martyncutcher Date: 2014-03-17 15:00:40 +0000 (Mon, 17 Mar 2014) Log Message: ----------- Fix for #855 to ensure a bad null child addr is not persisted Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_4/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java Modified: branches/BIGDATA_RELEASE_1_2_4/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_4/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java 2014-03-16 23:25:29 UTC (rev 7989) +++ branches/BIGDATA_RELEASE_1_2_4/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java 2014-03-17 15:00:40 UTC (rev 7990) @@ -43,6 +43,7 @@ import com.bigdata.io.AbstractFixedByteArrayBuffer; import com.bigdata.io.DataOutputBuffer; import com.bigdata.rawstore.Bytes; +import com.bigdata.rawstore.IRawStore; /** * Default implementation for immutable {@link INodeData} records. @@ -212,6 +213,16 @@ // final int O_childAddr = buf.pos(); for (int i = 0; i <= nkeys; i++) { + /* + * See #855 (Child identity is not persistent). + */ + + final long childAddr = node.getChildAddr(i); + + if (childAddr == IRawStore.NULL) + throw new AssertionError("Child is not persistent: index=" + i + + " out of " + nkeys + " entries, " + node.toString()); + buf.putLong(node.getChildAddr(i)); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-03-16 23:25:32
|
Revision: 7989 http://sourceforge.net/p/bigdata/code/7989 Author: thompsonbry Date: 2014-03-16 23:25:29 +0000 (Sun, 16 Mar 2014) Log Message: ----------- Workaround for the ability to compile the code in CI. I still need to figure out the underlying problem since we can not write test suites to the servlet 3.0 API until this is resolved and I do not think it will run against the servlet 2.0 JAR. See #624 (HA load balancer) Modified Paths: -------------- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2014-03-16 23:09:34 UTC (rev 7988) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2014-03-16 23:25:29 UTC (rev 7989) @@ -246,7 +246,7 @@ if (timestamp == null) { - return getConfig(req.getServletContext()).timestamp; + return getConfig(getServletContext()).timestamp; } @@ -291,7 +291,7 @@ } // use the default namespace. - return getConfig(req.getServletContext()).namespace; + return getConfig(getServletContext()).namespace; } Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-03-16 23:09:34 UTC (rev 7988) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-03-16 23:25:29 UTC (rev 7989) @@ -228,10 +228,11 @@ * * @throws IOException */ - static boolean isWritable(final HttpServletRequest req, - final HttpServletResponse resp) throws IOException { + static boolean isWritable(final ServletContext servletContext, + final HttpServletRequest req, final HttpServletResponse resp) + throws IOException { - if(getConfig(req.getServletContext()).readOnly) { + if (getConfig(servletContext).readOnly) { buildResponse(resp, HTTP_METHOD_NOT_ALLOWED, MIME_TEXT_PLAIN, "Not writable."); @@ -240,8 +241,7 @@ return false; } - final HAStatusEnum haStatus = getHAStatus(getIndexManager(req - .getServletContext())); + final HAStatusEnum haStatus = getHAStatus(getIndexManager(servletContext)); if (haStatus == null) { // No quorum. return true; @@ -270,11 +270,11 @@ * * @throws IOException */ - static boolean isReadable(final HttpServletRequest req, - final HttpServletResponse resp) throws IOException { + static boolean isReadable(final ServletContext ctx, + final HttpServletRequest req, final HttpServletResponse resp) + throws IOException { - final HAStatusEnum haStatus = getHAStatus(getIndexManager(req - .getServletContext())); + final HAStatusEnum haStatus = getHAStatus(getIndexManager(ctx)); if (haStatus == null) { // No quorum. return true; @@ -364,7 +364,8 @@ * * @return The known serviceURIs for this service. */ - static public String[] getServiceURIs(final HttpServletRequest req) { + static public String[] getServiceURIs(final ServletContext servletContext, + final HttpServletRequest req) { // One or more. final List<String> serviceURIs = new LinkedList<String>(); @@ -407,8 +408,8 @@ * where LBS is the prefix of the load balancer servlet. */ { - final String prefix = (String) req.getServletContext() - .getAttribute(ATTRIBUTE_LBS_PREFIX); + final String prefix = (String) servletContext.getAttribute( + ATTRIBUTE_LBS_PREFIX); if (prefix != null) { @@ -421,8 +422,7 @@ // The ContextPath for the webapp. This should be the next thing // in the [uri]. - final String contextPath = req.getServletContext() - .getContextPath(); + final String contextPath = servletContext.getContextPath(); // The index of the end of the ContextPath. final int endContextPath = nextSlash Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2014-03-16 23:09:34 UTC (rev 7988) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2014-03-16 23:25:29 UTC (rev 7989) @@ -74,7 +74,7 @@ protected void doDelete(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (!isWritable(req, resp)) { + if (!isWritable(getServletContext(), req, resp)) { // Service must be writable. return; } @@ -234,7 +234,7 @@ protected void doPost(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (!isWritable(req, resp)) { + if (!isWritable(getServletContext(), req, resp)) { // Service must be writable. return; } Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-03-16 23:09:34 UTC (rev 7988) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-03-16 23:25:29 UTC (rev 7989) @@ -563,10 +563,10 @@ } private String getLeaderURL(final HttpServletRequest request) { - - final ServletContext servletContext = request.getServletContext(); - final HAJournal journal = (HAJournal) BigdataServlet + final ServletContext servletContext = getServletContext(); + + final HAJournal journal = (HAJournal) BigdataServlet .getIndexManager(servletContext); final Quorum<HAGlue, QuorumService<HAGlue>> quorum = journal.getQuorum(); Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2014-03-16 23:09:34 UTC (rev 7988) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2014-03-16 23:25:29 UTC (rev 7989) @@ -106,7 +106,7 @@ protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException { - if (!isWritable(req, resp)) { + if (!isWritable(getServletContext(), req, resp)) { // Service must be writable. return; } Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-03-16 23:09:34 UTC (rev 7988) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-03-16 23:25:29 UTC (rev 7989) @@ -118,7 +118,7 @@ protected void doPost(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (!isWritable(req, resp)) { + if (!isWritable(getServletContext(), req, resp)) { // Service must be writable. return; } @@ -146,7 +146,7 @@ protected void doDelete(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (!isWritable(req, resp)) { + if (!isWritable(getServletContext(), req, resp)) { // Service must be writable. return; } @@ -172,7 +172,7 @@ protected void doPut(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (!isWritable(req, resp)) { + if (!isWritable(getServletContext(), req, resp)) { // Service must be writable. return; } @@ -582,8 +582,8 @@ final BNode aDataSet = g.getValueFactory().createBNode(); // Figure out the service end point(s). - final String[] serviceURI = getServiceURIs(req); - + final String[] serviceURI = getServiceURIs(getServletContext(), req); + final VoID v = new VoID(g, tripleStore, serviceURI, aDataSet); v.describeDataSet(false/* describeStatistics */, Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-03-16 23:09:34 UTC (rev 7988) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-03-16 23:25:29 UTC (rev 7989) @@ -258,8 +258,9 @@ } // The serviceURIs for this graph. - final String[] serviceURI = BigdataServlet.getServiceURIs(req); - + final String[] serviceURI = BigdataServlet.getServiceURIs( + getServletContext(), req); + /* * TODO Resolve the SD class name and ctor via a configuration property * for extensible descriptions. @@ -297,7 +298,7 @@ private void doUpdate(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (!isWritable(req, resp)) { + if (!isWritable(getServletContext(), req, resp)) { // Service must be writable. return; } @@ -397,7 +398,7 @@ void doQuery(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (!isReadable(req, resp)) { + if (!isReadable(getServletContext(), req, resp)) { // HA Quorum in use, but quorum is not met. return; } @@ -1011,7 +1012,7 @@ private void doEstCard(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (!isReadable(req, resp)) { + if (!isReadable(getServletContext(), req, resp)) { // HA Quorum in use, but quorum is not met. return; } @@ -1108,7 +1109,7 @@ private void doContexts(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (!isReadable(req, resp)) { + if (!isReadable(getServletContext(), req, resp)) { // HA Quorum in use, but quorum is not met. return; } @@ -1174,7 +1175,7 @@ private void doShardReport(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (!isReadable(req, resp)) { + if (!isReadable(getServletContext(), req, resp)) { // HA Quorum in use, but quorum is not met. return; } Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java 2014-03-16 23:09:34 UTC (rev 7988) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java 2014-03-16 23:25:29 UTC (rev 7989) @@ -72,10 +72,10 @@ } @Override - protected void doPut(HttpServletRequest req, HttpServletResponse resp) - throws IOException { + protected void doPut(final HttpServletRequest req, + final HttpServletResponse resp) throws IOException { - if (!isWritable(req, resp)) { + if (!isWritable(getServletContext(), req, resp)) { // Service must be writable. return; } @@ -348,7 +348,7 @@ protected void doPost(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (!isWritable(req, resp)) { + if (!isWritable(getServletContext(), req, resp)) { // Service must be writable. return; } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-03-16 23:09:37
|
Revision: 7988 http://sourceforge.net/p/bigdata/code/7988 Author: thompsonbry Date: 2014-03-16 23:09:34 +0000 (Sun, 16 Mar 2014) Log Message: ----------- Commit of the initial LBS implementation. It is commented out in web.xml. This implementation is just a proof of concept. It needs to be hardened and developed significantly before it can be of any use. See #624 Modified Paths: -------------- branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/VoID.java branches/RDR/bigdata-war/src/WEB-INF/web.xml Added Paths: ----------- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java Modified: branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config =================================================================== --- branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-03-16 22:59:51 UTC (rev 7987) +++ branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-03-16 23:09:34 UTC (rev 7988) @@ -270,6 +270,11 @@ new NV(AbstractTransactionService.Options.MIN_RELEASE_AGE,"1"), + new NV(com.bigdata.journal.PlatformStatsPlugIn.Options.COLLECT_PLATFORM_STATISTICS,"true"), + new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN,"true"), + new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT,"true"), + new NV(com.bigdata.journal.GangliaPlugIn.Options.REPORT_DELAY,"2000"), // NB: short delay is used to develop the HALBS. + }, bigdata.kb); } Modified: branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config =================================================================== --- branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config 2014-03-16 22:59:51 UTC (rev 7987) +++ branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config 2014-03-16 23:09:34 UTC (rev 7988) @@ -272,6 +272,11 @@ new NV(AbstractTransactionService.Options.MIN_RELEASE_AGE,"1"), + new NV(com.bigdata.journal.PlatformStatsPlugIn.Options.COLLECT_PLATFORM_STATISTICS,"true"), + new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN,"true"), + new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT,"true"), + new NV(com.bigdata.journal.GangliaPlugIn.Options.REPORT_DELAY,"2000"), // NB: short delay is used to develop the HALBS. + }, bigdata.kb); } Modified: branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config =================================================================== --- branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-03-16 22:59:51 UTC (rev 7987) +++ branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-03-16 23:09:34 UTC (rev 7988) @@ -269,6 +269,10 @@ new NV(AbstractTransactionService.Options.MIN_RELEASE_AGE,"1"), + new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN,"true"), + new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT,"true"), + new NV(com.bigdata.journal.GangliaPlugIn.Options.REPORT_DELAY,"2000"), // NB: short delay is used to develop the HALBS. + }, bigdata.kb); } Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2014-03-16 22:59:51 UTC (rev 7987) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2014-03-16 23:09:34 UTC (rev 7988) @@ -39,7 +39,6 @@ import java.util.Properties; import javax.servlet.Servlet; -import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -80,13 +79,6 @@ static private final transient Logger log = Logger.getLogger(BigdataRDFServlet.class); /** - * The name of the {@link ServletContext} attribute whose value is the - * {@link BigdataRDFContext}. - */ - static public final transient String ATTRIBUTE_RDF_CONTEXT = BigdataRDFContext.class - .getName(); - - /** * The name of the <code>UTF-8</code> character encoding. */ protected static final String UTF8 = "UTF-8"; @@ -123,53 +115,33 @@ } - final protected SparqlEndpointConfig getConfig() { - - return getBigdataRDFContext().getConfig(); +// /** +// * {@inheritDoc} +// * <p> +// * Note: Overridden to support read-only deployments. +// * +// * @see SparqlEndpointConfig#readOnly +// * @see ConfigParams#READ_ONLY +// */ +// @Override +// static boolean isWritable(final HttpServletRequest req, +// final HttpServletResponse resp) throws IOException { +// +// if(getConfig().readOnly) { +// +// buildResponse(resp, HTTP_METHOD_NOT_ALLOWED, MIME_TEXT_PLAIN, +// "Not writable."); +// +// // Not writable. Response has been committed. +// return false; +// +// } +// +// return super.isWritable(req, resp); +// +// } - } - - final protected BigdataRDFContext getBigdataRDFContext() { - - if (m_context == null) { - - m_context = getRequiredServletContextAttribute(ATTRIBUTE_RDF_CONTEXT); - - } - - return m_context; - - } - - private volatile BigdataRDFContext m_context; - /** - * {@inheritDoc} - * <p> - * Note: Overridden to support read-only deployments. - * - * @see SparqlEndpointConfig#readOnly - * @see ConfigParams#READ_ONLY - */ - @Override - protected boolean isWritable(final HttpServletRequest req, - final HttpServletResponse resp) throws IOException { - - if(getConfig().readOnly) { - - buildResponse(resp, HTTP_METHOD_NOT_ALLOWED, MIME_TEXT_PLAIN, - "Not writable."); - - // Not writable. Response has been committed. - return false; - - } - - return super.isWritable(req, resp); - - } - - /** * Write the stack trace onto the output stream. This will show up in the * client's response. This code path should be used iff we have already * begun writing the response. Otherwise, an HTTP error status should be @@ -274,7 +246,7 @@ if (timestamp == null) { - return getConfig().timestamp; + return getConfig(req.getServletContext()).timestamp; } @@ -319,7 +291,7 @@ } // use the default namespace. - return getConfig().namespace; + return getConfig(req.getServletContext()).namespace; } Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-03-16 22:59:51 UTC (rev 7987) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-03-16 23:09:34 UTC (rev 7988) @@ -29,6 +29,8 @@ import java.io.InputStreamReader; import java.io.OutputStream; import java.io.Writer; +import java.util.LinkedList; +import java.util.List; import javax.servlet.ServletContext; import javax.servlet.http.HttpServlet; @@ -41,6 +43,7 @@ import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.IIndexManager; import com.bigdata.quorum.AbstractQuorum; +import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet.InitParams; import com.bigdata.rdf.sail.webapp.client.IMimeTypes; /** @@ -58,11 +61,25 @@ /** * The name of the {@link ServletContext} attribute whose value is the + * {@link BigdataRDFContext}. + */ + static public final transient String ATTRIBUTE_RDF_CONTEXT = BigdataRDFContext.class + .getName(); + + /** + * The name of the {@link ServletContext} attribute whose value is the * {@link IIndexManager}. */ /*package*/ static final transient String ATTRIBUTE_INDEX_MANAGER = IIndexManager.class.getName(); + /** + * The {@link ServletContext} attribute whose value is the prefix for the + * {@link HALoadBalancerServlet} iff it is running. + */ + static final String ATTRIBUTE_LBS_PREFIX = HALoadBalancerServlet.class + .getName() + "." + InitParams.PREFIX; + // /** // * The {@link ServletContext} attribute whose value is the // * {@link SparqlCache}. @@ -91,12 +108,19 @@ HTTP_BADREQUEST = HttpServletResponse.SC_BAD_REQUEST, HTTP_METHOD_NOT_ALLOWED = HttpServletResponse.SC_METHOD_NOT_ALLOWED, HTTP_INTERNALERROR = HttpServletResponse.SC_INTERNAL_SERVER_ERROR, - HTTP_NOTIMPLEMENTED = HttpServletResponse.SC_NOT_IMPLEMENTED; + HTTP_NOTIMPLEMENTED = HttpServletResponse.SC_NOT_IMPLEMENTED; - protected <T> T getRequiredServletContextAttribute(final String name) { + static <T> T getRequiredServletContextAttribute( + final ServletContext servletContext, final String name) { + if (servletContext == null) + throw new IllegalArgumentException(); + + if (name == null) + throw new IllegalArgumentException(); + @SuppressWarnings("unchecked") - final T v = (T) getServletContext().getAttribute(name); + final T v = (T) servletContext.getAttribute(name); if (v == null) throw new RuntimeException("Not set: " + name); @@ -105,15 +129,55 @@ } + static final SparqlEndpointConfig getConfig( + final ServletContext servletContext) { + + return getBigdataRDFContext(servletContext).getConfig(); + + } + + protected final BigdataRDFContext getBigdataRDFContext() { + + return getBigdataRDFContext(getServletContext()); + + } + + static final BigdataRDFContext getBigdataRDFContext( + final ServletContext servletContext) { + +// if (m_context == null) { +// +// m_context = + return getRequiredServletContextAttribute(servletContext, + ATTRIBUTE_RDF_CONTEXT); + +// } +// +// return m_context; + + } + +// private volatile BigdataRDFContext m_context; + /** * The backing {@link IIndexManager}. */ - protected IIndexManager getIndexManager() { - - return getRequiredServletContextAttribute(ATTRIBUTE_INDEX_MANAGER); - - } + protected IIndexManager getIndexManager() { + + return getIndexManager(getServletContext()); + + } + /** + * The backing {@link IIndexManager}. + */ + static IIndexManager getIndexManager(final ServletContext servletContext) { + + return getRequiredServletContextAttribute(servletContext, + ATTRIBUTE_INDEX_MANAGER); + + } + // /** // * Return the {@link Quorum} -or- <code>null</code> if the // * {@link IIndexManager} is not participating in an HA {@link Quorum}. @@ -137,10 +201,11 @@ * {@link IIndexManager} is not an {@link AbstractQuorum} or is not HA * enabled. */ - protected HAStatusEnum getHAStatus() { + static public HAStatusEnum getHAStatus(final IIndexManager indexManager) { + + if (indexManager == null) + throw new IllegalArgumentException(); - final IIndexManager indexManager = getIndexManager(); - if (indexManager instanceof AbstractJournal) { // Note: Invocation against local object (NOT RMI). @@ -163,10 +228,20 @@ * * @throws IOException */ - protected boolean isWritable(final HttpServletRequest req, + static boolean isWritable(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - final HAStatusEnum haStatus = getHAStatus(); + if(getConfig(req.getServletContext()).readOnly) { + + buildResponse(resp, HTTP_METHOD_NOT_ALLOWED, MIME_TEXT_PLAIN, + "Not writable."); + + // Not writable. Response has been committed. + return false; + + } + final HAStatusEnum haStatus = getHAStatus(getIndexManager(req + .getServletContext())); if (haStatus == null) { // No quorum. return true; @@ -195,10 +270,11 @@ * * @throws IOException */ - protected boolean isReadable(final HttpServletRequest req, + static boolean isReadable(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - final HAStatusEnum haStatus = getHAStatus(); + final HAStatusEnum haStatus = getHAStatus(getIndexManager(req + .getServletContext())); if (haStatus == null) { // No quorum. return true; @@ -279,7 +355,99 @@ // return getRequiredServletContextAttribute(ATTRIBUTE_SPARQL_CACHE); // // } - + + /** + * Return the serviceURI(s) for this service (one or more). + * + * @param req + * The request. + * + * @return The known serviceURIs for this service. + */ + static public String[] getServiceURIs(final HttpServletRequest req) { + + // One or more. + final List<String> serviceURIs = new LinkedList<String>(); + + /* + * Figure out the service end point. + * + * Note: This is just the requestURL as reported. This makes is + * possible to support virtual hosting and similar http proxy + * patterns since the SPARQL end point is just the URL at which the + * service is responding. + */ + final String uri; + { + + final StringBuffer sb = req.getRequestURL(); + + final int indexOf = sb.indexOf("?"); + + if (indexOf == -1) { + uri = sb.toString(); + } else { + uri = sb.substring(0, indexOf); + } + serviceURIs.add(uri); + + } + + /** + * If the load balancer servlet is registered, then get its effective + * service URI. This will be a load balanced version of the serviceURI + * that we obtained above. We are trying to go from + * + * http://localhost:8080/bigdata/sparql + * + * to + * + * http://localhost:8080/bigdata/LBS/sparql + * + * where LBS is the prefix of the load balancer servlet. + */ + { + final String prefix = (String) req.getServletContext() + .getAttribute(ATTRIBUTE_LBS_PREFIX); + + if (prefix != null) { + + // locate the // in the protocol. + final int doubleSlash = uri.indexOf("//"); + + // skip past that and locate the next / + final int nextSlash = uri + .indexOf('/', doubleSlash + 2/* fromIndex */); + + // The ContextPath for the webapp. This should be the next thing + // in the [uri]. + final String contextPath = req.getServletContext() + .getContextPath(); + + // The index of the end of the ContextPath. + final int endContextPath = nextSlash + + contextPath.length(); + + // everything up to the *start* of the ContextPath + final String baseURL = uri.substring(0/* beginIndex */, + nextSlash/* endIndex */); + + final String s = baseURL // base URL + + prefix // LBS prefix (includes ContextPath) + + (prefix.endsWith("/") ? "" : "/") + + uri.substring(endContextPath + 1) // remainder of requestURL. + ; + + serviceURIs.add(s); + + } + + } + + return serviceURIs.toArray(new String[serviceURIs.size()]); + + } + static public void buildResponse(final HttpServletResponse resp, final int status, final String mimeType) throws IOException { Added: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java (rev 0) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-03-16 23:09:34 UTC (rev 7988) @@ -0,0 +1,1036 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sail.webapp; + +import java.io.IOException; +import java.net.URI; +import java.util.Arrays; +import java.util.Comparator; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicReference; + +import javax.servlet.ServletConfig; +import javax.servlet.ServletContext; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.log4j.Logger; +import org.eclipse.jetty.proxy.ProxyServlet; + +import com.bigdata.ganglia.GangliaService; +import com.bigdata.ganglia.HostReportComparator; +import com.bigdata.ganglia.IHostReport; +import com.bigdata.ha.HAGlue; +import com.bigdata.ha.QuorumService; +import com.bigdata.journal.GangliaPlugIn; +import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.PlatformStatsPlugIn; +import com.bigdata.journal.jini.ha.HAJournal; +import com.bigdata.quorum.Quorum; + +/** + * + The HA Load Balancer servlet provides a transparent proxy for requests + * arriving its configured URL pattern (the "external" interface for the load + * balancer) to the root of the web application. + * <P> + * The use of the load balancer is entirely optional. If the security rules + * permit, then clients MAY make requests directly against a specific service. + * Thus, no specific provision exists to disable the load balancer servlet, but + * you may choose not to deploy it. + * <p> + * When successfully deployed, requests having prefix corresponding to the URL + * pattern for the load balancer (typically, "/bigdata/LBS/*") are automatically + * redirected to a joined service in the met quorum based on the configured load + * balancer policy. + * <p> + * The load balancer policies are "HA aware." They will always redirect update + * requests to the quorum leader. The default polices will load balance read + * requests over the leader and followers in a manner that reflects the CPU, IO + * Wait, and GC Time associated with each service. The PlatformStatsPlugIn and + * GangliaPlugIn MUST be enabled for the default load balancer policy to + * operate. It depends on those plugins to maintain a model of the load on the + * HA replication cluster. The GangliaPlugIn should be run only as a listener if + * you are are running the real gmond process on the host. If you are not + * running gmond, then the GangliaPlugIn should be configured as both a listener + * and a sender. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * @see <a href="http://trac.bigdata.com/ticket/624"> HA Load Balancer </a> + * + * TODO Define some interesting load balancer policies. We can start with + * HA aware round robin and an HA aware policy that is load balanced based + * on the ganglia reported metrics model. + * + * All policies must be HA aware - we only want to send requests to + * services that are joined with the met quorum. + * + * TODO If the target service winds up not joined with the met quorum by + * the time we get there, what should it do? Report an error since we are + * already on its internal interface? Will this servlet see that error? If + * it does, should it handle it? + */ +public class HALoadBalancerServlet extends ProxyServlet { + + private static final Logger log = Logger + .getLogger(HALoadBalancerServlet.class); + + /** + * + */ + private static final long serialVersionUID = 1L; + + public interface InitParams { + +// String ENABLED = "enabled"; +// +// String DEFAULT_ENABLED = "false"; + + /** + * The prefix at which the load balancer is deployed (its URL pattern, + * less any wildcard). This is typically + * + * <pre> + * /bigdata/LBS + * </pre> + * + * but the actual value depends on the servlet mappined established in + * <code>web.xml</code>. + */ + String PREFIX = "prefix"; + + /** + * The load balancer policy (optional). This must be an instance of + * {@link IHALoadBalancerPolicy}. + */ + String POLICY = "policy"; + + String DEFAULT_POLICY = DefaultLBSPolicy.class.getName(); + + /** + * A {@link Comparator} that places {@link IHostReport}s into a total + * ordering from the host with the least load to the host with the + * greatest load (optional). + */ + String COMPARATOR = "comparator"; + + String DEFAULT_COMPARATOR = DefaultHostReportComparator.class.getName(); + + } + + public HALoadBalancerServlet() { + super(); + } + + private boolean enabled = false; + private String prefix = null; + private IHALoadBalancerPolicy policy; + private Comparator<IHostReport> comparator; + private GangliaService gangliaService; + private String[] reportOn; + + @SuppressWarnings("unchecked") + @Override + public void init() throws ServletException { + + super.init(); + + final ServletConfig servletConfig = getServletConfig(); + + final ServletContext servletContext = servletConfig.getServletContext(); + + prefix = servletConfig.getInitParameter(InitParams.PREFIX); + + policy = newInstance(servletConfig, IHALoadBalancerPolicy.class, + InitParams.POLICY, InitParams.DEFAULT_POLICY); + + comparator = newInstance(servletConfig, Comparator.class, + InitParams.COMPARATOR, InitParams.DEFAULT_COMPARATOR); + + final IIndexManager indexManager = BigdataServlet + .getIndexManager(servletContext); + + if (!(indexManager instanceof HAJournal)) { + throw new ServletException("Not HA"); + } + + final HAJournal journal = (HAJournal) indexManager; + + if (journal.getPlatformStatisticsCollector() == null) { + throw new ServletException("LBS requires " + + PlatformStatsPlugIn.class.getName()); + } + + gangliaService = (GangliaService) journal.getGangliaService(); + + if (gangliaService == null) { + throw new ServletException("LBS requires " + + GangliaPlugIn.class.getName()); + } + + reportOn = gangliaService.getDefaultHostReportOn(); + + enabled = true; + + servletContext.setAttribute(BigdataServlet.ATTRIBUTE_LBS_PREFIX, + prefix); + + if (log.isInfoEnabled()) + log.info(servletConfig.getServletName() + " @ " + prefix); + + } + + @Override + public void destroy() { + + enabled = false; + + prefix = null; + + policy = null; + + comparator = null; + + reportOn = null; + + gangliaService = null; + + getServletContext().setAttribute(BigdataServlet.ATTRIBUTE_LBS_PREFIX, + null); + + super.destroy(); + + } + + /** + * Create an instance of some type based on the servlet init parameters. + * + * @param servletConfig + * The {@link ServletConfig}. + * @param iface + * The interface that the type must implement. + * @param name + * The name of the servlet init parameter. + * @param def + * The default value for the servlet init parameter. + * + * @return The instance of the configured type. + * + * @throws ServletException + * if anything goes wrong. + */ + @SuppressWarnings("unchecked") + private static <T> T newInstance(final ServletConfig servletConfig, + final Class<? extends T> iface, final String name, final String def) + throws ServletException { + + final T t; + + String s = servletConfig.getInitParameter(name); + + if (s == null || s.trim().length() == 0) { + + s = def; + + } + + final Class<? extends T> cls; + try { + cls = (Class<? extends T>) Class.forName(s); + } catch (ClassNotFoundException e) { + throw new ServletException("cls=" + s + "cause=" + e, e); + } + + if (!iface.isAssignableFrom(cls)) + throw new IllegalArgumentException(name + ":: " + s + + " must extend " + iface.getName()); + + try { + t = (T) cls.newInstance(); + } catch (InstantiationException e) { + throw new ServletException(e); + } catch (IllegalAccessException e) { + throw new ServletException(e); + } + + return t; + + } + + @Override + protected void service(final HttpServletRequest request, + final HttpServletResponse response) throws ServletException, + IOException { + + if (!enabled) { + // The LBS is not available. + response.sendError(HttpServletResponse.SC_NOT_FOUND); + } + + final HostScore[] hosts = hostTable.get(); + + if (hosts == null || hosts.length == 0) { + + // Ensure that the host table exists. + updateHostsTable(); + + } + + final HAGlueScore[] services = serviceTable.get(); + + if (services == null || services.length == 0) { + + /* + * Ensure that the service table exists (more correctly, attempt to + * populate it, but we can only do that if the HAQuorumService is + * running.) + */ + + updateServicesTable(); + + } + + /* + * TODO if rewriteURL() returns null, then the base class (ProxyServlet) + * returns SC_FORBIDDEN. It should return something less ominous, like a + * 404. With an explanation. Or a RETRY. + */ + super.service(request, response); + + } + + /** + * Update the per-host scoring table. + * + * @see #hostTable + * + * FIXME This MUST be updated on a periodic basis. We can probably + * query the gangliaService to figure out how often it gets updates, or + * we can do this every 5 seconds or so (the ganglia updates are not + * synchronized across a cluster - they just pour in). + * + * TODO For scalability on clusters with a lot of ganglia chatter, we + * should only keep the data from those hosts that are of interest for + * a given HA replication cluster. The load on other hosts has no + * impact on our decision when load balancing within an HA replication + * cluster. + */ + private void updateHostsTable() { + + /* + * Note: If there is more than one service on the same host, then we + * will have one record per host, not per service. + * + * Note: The actual metrics that are available depend on the OS and on + * whether you are running gmond or having the GangliaPlugIn do its own + * reporting. The policy that ranks the host reports should be robust to + * these variations. + */ + final IHostReport[] hostReport = gangliaService.getHostReport(// + reportOn,// metrics to be reported. + comparator// imposes order on the host reports. + ); + + log.warn("hostReport=" + Arrays.toString(hostReport)); + + final HostScore[] scores = new HostScore[hostReport.length]; + + for (int i = 0; i < hostReport.length; i++) { + + final IHostReport r = hostReport[i]; + + /* + * TODO This is ignoring the metrics for the host and weighting all + * hosts equally. + */ + scores[i++] = new HostScore(r.getHostName(), 1.0, + (double) hostReport.length); + + } + + // sort into ascending order (increasing activity). + Arrays.sort(scores); + + for (int i = 0; i < scores.length; i++) { + + scores[i].rank = i; + + scores[i].drank = ((double) i) / scores.length; + + } + + if (log.isDebugEnabled()) { + + log.debug("The most active index was: " + scores[scores.length - 1]); + + log.debug("The least active index was: " + scores[0]); + + } + + this.hostTable.set(scores); + + } + + /** + * Update the per-service table. + * + * @see #serviceTable + * + * FIXME This MUST be maintained by appropriate watchers such that we + * just consult the as maintained information and act immediately on + * it. We can not afford any latency for RMI or even figuring out which + * the host has the least load. That should all be maintained by a + * scheduled thread and listeners. + */ + private void updateServicesTable() { + + final ServletContext servletContext = getServletContext(); + + final HAJournal journal = (HAJournal) BigdataServlet + .getIndexManager(servletContext); + + final Quorum<HAGlue, QuorumService<HAGlue>> quorum = journal.getQuorum(); + + /* + * Note: This is the *local* HAGlueService. + * + * This page must be robust to some new failure modes. The ZooKeeper + * client can now be associated with an expired session, River discovery + * can now be disabled, and the HAQuorumService might not be available + * from quorum.getClient(). All of those things can happen if there is a + * zookeeper session expiration that forces us to terminate the + * HAQuorumService. This condition will be cured automatically (unless + * the service is being shutdown), but only limited status information + * can be provided while the HAQuorumService is not running. + */ + final QuorumService<HAGlue> quorumService; + { + QuorumService<HAGlue> t; + try { + t = (QuorumService) quorum.getClient(); + } catch (IllegalStateException ex) { + // Note: Not available (quorum.start() not called). + return; + } + quorumService = t; + } + + final UUID[] joined = quorum.getJoined(); + final HAGlueScore[] serviceScores = new HAGlueScore[joined.length]; + + for (int i = 0; i < joined.length; i++) { + final UUID serviceId = joined[i]; + try { + + /* + * TODO Scan the existing table before doing an RMI to the + * service. We only need to do the RMI for a new service, not + * one in the table. + * + * TODO A services HashMap<UUID,HAGlueScore> would be much more + * efficient than a table. If we use a CHM, then we can do this + * purely asynchronously as the HAGlue services entire the set + * of joined services. + */ + serviceScores[i] = new HAGlueScore(servletContext, serviceId); + + } catch (RuntimeException ex) { + + /* + * Ignore. Might not be an HAGlue instance. + */ + + if (log.isInfoEnabled()) + log.info(ex, ex); + + continue; + + } + + } + + this.serviceTable.set(serviceScores); + + } + + /* + * FIXME Choose among pre-computed and maintained proxy targets based on the + * LBS policy. + */ + private static final String _proxyTo = "http://localhost:8091/bigdata"; + + /** + * The table of pre-scored hosts. + * + * TODO There is an entry for all known hosts, but not all hosts are running + * service that we care about. So we have to run over the table, filtering + * for hosts that have services that we care about. + */ + private final AtomicReference<HostScore[]> hostTable = new AtomicReference<HostScore[]>( + null); + + /** + * This is the table of known services. We can scan the table for a service + * {@link UUID} and then forward a request to the pre-computed requestURL + * associated with that {@link UUID}. If the requestURL is <code>null</code> + * then we do not know how to reach that service and can not proxy the + * request. + */ + private final AtomicReference<HAGlueScore[]> serviceTable = new AtomicReference<HAGlueScore[]>( + null); + + /** + * For update requests, rewrite the requestURL to the service that is the + * quorum leader. For read requests, rewrite the requestURL to the service + * having the least load. + */ + @Override + protected URI rewriteURI(final HttpServletRequest request) + { + final String path = request.getRequestURI(); + if (!path.startsWith(prefix)) + return null; + + final boolean isUpdate = isUpdateRequest(request); + final String proxyTo; + if(isUpdate) { + // Proxy to leader. + proxyTo = getLeaderURL(request); + } else { + // Proxy to any joined service. + proxyTo = getReaderURL(request); + } + if (proxyTo == null) { + // Could not rewrite. + return null; + } + final StringBuilder uri = new StringBuilder(proxyTo); + if (proxyTo.endsWith("/")) + uri.setLength(uri.length() - 1); + final String rest = path.substring(prefix.length()); + if (!rest.startsWith("/")) + uri.append("/"); + uri.append(rest); + final String query = request.getQueryString(); + if (query != null) + uri.append("?").append(query); + final URI rewrittenURI = URI.create(uri.toString()).normalize(); + + if (!validateDestination(rewrittenURI.getHost(), rewrittenURI.getPort())) + return null; + + if (log.isInfoEnabled()) + log.info("rewrote: " + path + " => " + rewrittenURI); + + return rewrittenURI; + } + + /** + * Return <code>true</code> iff this is an UPDATE request that must be + * proxied to the quorum leader. + * + * FIXME How do we identify "UPDATE" requests? DELETE and PUT are update + * requests, but POST is not always an UPDATE. It can also be used for + * QUERY. GET is never an UPDATE request, and that is what this is based on + * right now. + */ + private boolean isUpdateRequest(HttpServletRequest request) { + + return !request.getMethod().equalsIgnoreCase("GET"); + + } + + private String getLeaderURL(final HttpServletRequest request) { + + final ServletContext servletContext = request.getServletContext(); + + final HAJournal journal = (HAJournal) BigdataServlet + .getIndexManager(servletContext); + + final Quorum<HAGlue, QuorumService<HAGlue>> quorum = journal.getQuorum(); + + final UUID leaderId = quorum.getLeaderId(); + + if (leaderId == null) { + // No quorum, so no leader. Can not proxy the request. + return null; + } + + /* + * Scan the services table to locate the leader and then proxy the + * request to the pre-computed requestURL for the leader. If that + * requestURL is null then we do not know about a leader and can not + * proxy the request at this time. + */ + + final HAGlueScore[] services = serviceTable.get(); + + if (services == null) { + + // No services. Can't proxy. + return null; + + } + + for (HAGlueScore s : services) { + + if (s.serviceUUID.equals(leaderId)) { + + // Found it. Proxy if the serviceURL is defined. + return s.requestURL; + + } + + } + + // Not found. Won't proxy. + return null; + + } + + /** + * Return the requestURL to which we will proxy a read request. + * + * @param request + * The request. + * + * @return The proxyTo URL -or- <code>null</code> if we could not find a + * service to which we could proxy this request. + */ + private String getReaderURL(final HttpServletRequest request) { + + final HostScore[] hostScores = this.hostTable.get(); + + if (hostScores == null) { + // Can't proxy to anything. + return null; + } + + // Choose a host : TODO This is just a round robin over the hosts. + HostScore hostScore = null; + for (int i = 0; i < hostScores.length; i++) { + + final int hostIndex = (i + nextHost) % hostScores.length; + + hostScore = hostScores[hostIndex]; + + if (hostScore == null) + continue; + + nextHost = hostIndex + 1; + + } + + if (hostScore == null) { + + // No hosts. Can't proxy. + return null; + + } + + final HAGlueScore[] services = this.serviceTable.get(); + + if (services == null) { + + // No services. Can't proxy. + return null; + + } + + /* + * Find a service on that host. + * + * TODO If none found, the try other hosts until we have tried each host + * once and then give up by returning null. This will require pushing + * down the service finder into a method that we call from the hosts + * loop. + */ + for(HAGlueScore x : services) { + + if (x.hostname == null) { + // Can't use if no hostname. + continue; + } + + if (x.requestURL == null) { + // Can't use if no requestURL. + continue; + } + + if (!x.hostname.equals(hostScore.hostname)) { + // This service is not on the host we are looking for. + continue; + } + + return x.requestURL; + + } + + // No service found on that host. + return null; + + } + int nextHost = 0; + + /** Place into descending order by load_one. */ + public static class DefaultHostReportComparator extends + HostReportComparator implements Comparator<IHostReport> { + + public DefaultHostReportComparator() { + super("load_one", true/* asc */); + } + + } + + /** + * Stochastically proxy the request to the services based on their load. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + public static class DefaultLBSPolicy implements IHALoadBalancerPolicy { + + @Override + public String proxyTo(HttpServletRequest req) { + // TODO Auto-generated method stub + return null; + } + + } + + /** + * Always proxy the request to the local service even if it is not HA ready + * (this policy defeats the load balancer). + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + public static class NOPLBSPolicy implements IHALoadBalancerPolicy { + + @Override + public String proxyTo(HttpServletRequest req) { + // TODO Auto-generated method stub + return null; + } + + } + + // TODO Define a host report comparator which uses the metrics that we care + // about (CPU, IOWait, GC Time). + +// /** +// * The default out-of-the-box comparator for ordering the hosts based on the +// * metrics that we care about (CPU, IO Wait, GC Time). +// * +// * @author <a href="mailto:tho...@us...">Bryan +// * Thompson</a> +// */ +// public class MyHostReportComparator implements Comparator<IHostReport> { +// +// public MyHostReportComparator() { +// +// } +// +// @Override +// public int compare(final IHostReport o1, final IHostReport o2) { +// +// final int ret = comp(o1, o2); +// +// return -ret; +// +// } +// +// private int comp(final IHostReport o1, final IHostReport o2) { +// +// final IGangliaMetricMessage m1 = o1.getMetrics().get(metricName); +// +// final IGangliaMetricMessage m2 = o2.getMetrics().get(metricName); +// +// if (m1 == null && m2 == null) +// return 0; +// else if (m1 == null) +// return -1; +// else if (m2 == null) +// return -1; +// +// final double d1 = Double.parseDouble(m1.getStringValue()); +// +// final double d2 = Double.parseDouble(m2.getStringValue()); +// +// if (d1 < d2) +// return -1; +// else if (d2 > d1) +// return 1; +// +// /* +// * Order by host name in case of a tie on the metric. This makes the +// * results more stable. (We could also round the metric a little to +// * improve stability. But that can be done in a custom comparator.) +// */ +// +// return o1.getHostName().compareTo(o2.getHostName()); +// +// } +// +// } + + /** + * Helper class caches metadata about an {@link HAGlue} service. + * <p> + * Note: This class is written fairly defensively. The fields can wind up + * being left at their default values (typically <code>null</code>) if we + * are not able to find the necessary information for the {@link HAGlue} + * service. Users of this class must test for <code>null</code> values and + * skip over those services since they have not been pre-resolved to a host + * and requestURL. + */ + private static class HAGlueScore { + + final UUID serviceUUID; + HAGlue haGlue; + String hostname; + int port; + /** + * The {@link #requestURL} is assigned IFF everything succeeds. This is + * what we will use to proxy a request to the service having the + * {@link UUID} given to the constuctor. + * + * Note: This needs to be a URL, not just a relative path. At least with + * the rewriteURI() code in the outer class. Otherwise you get an NPE. + */ + String requestURL; + + public HAGlueScore(final ServletContext servletContext, + final UUID serviceUUID) { + + if (servletContext == null) + throw new IllegalArgumentException(); + + if (serviceUUID == null) + throw new IllegalArgumentException(); + + this.serviceUUID = serviceUUID; + + final HAJournal journal = (HAJournal) BigdataServlet + .getIndexManager(servletContext); + + final Quorum<HAGlue, QuorumService<HAGlue>> quorum = journal + .getQuorum(); + + if (quorum == null) { + // No quorum. + return; + } + + /* + * Note: This is the *local* HAGlueService. + * + * This page must be robust to some new failure modes. The ZooKeeper + * client can now be associated with an expired session, River discovery + * can now be disabled, and the HAQuorumService might not be available + * from quorum.getClient(). All of those things can happen if there is a + * zookeeper session expiration that forces us to terminate the + * HAQuorumService. This condition will be cured automatically (unless + * the service is being shutdown), but only limited status information + * can be provided while the HAQuorumService is not running. + */ + final QuorumService<HAGlue> quorumService; + { + QuorumService<HAGlue> t; + try { + t = (QuorumService) quorum.getClient(); + } catch (IllegalStateException ex) { + // Note: Not available (quorum.start() not called). + return; + } + quorumService = t; + } + + try { + haGlue = quorumService.getService(serviceUUID); + } catch (IllegalArgumentException ex) { + // No such service. + return; + } + + /* + * TODO The hostname and port are RMIs. Use a smart proxy. + */ + try { + hostname = haGlue.getHostname(); + port = haGlue.getNSSPort(); + } catch (IOException ex) { + // RMI error. + return; + } + + final String contextPath = servletContext.getContextPath(); + + requestURL = "http://" + hostname + ":" + port + + contextPath; + + } + + } + + /** + * Helper class assigns a raw and a normalized score to each host based on + * its per-host. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + private static class HostScore implements Comparable<HostScore> { + + /** The hostname. */ + private final String hostname; + + /** The raw (write) score computed for that index partition. */ + private final double rawScore; + + /** The normalized score computed for that index partition. */ + private final double score; + + /** The rank in [0:#scored]. This is an index into the Scores[]. */ + private int rank = -1; + + /** The normalized double precision rank in [0.0:1.0]. */ + private double drank = -1d; + + @Override + public String toString() { + + return "Score{hostname=" + hostname + ", rawScore=" + rawScore + + ", score=" + score + ", rank=" + rank + ", drank=" + + drank + "}"; + + } + + public HostScore(final String hostname, final double rawScore, + final double totalRawScore) { + + this.hostname = hostname; + + this.rawScore = rawScore; + + score = normalize(rawScore, totalRawScore); + + } + + /** + * Places elements into order by ascending {@link #rawScore}. The + * {@link #hostname} is used to break any ties. + */ + public int compareTo(final HostScore arg0) { + + if (rawScore < arg0.rawScore) { + + return -1; + + } else if (rawScore > arg0.rawScore) { + + return 1; + + } + + return hostname.compareTo(arg0.hostname); + + } + + } + + /** + * Places {@link HostScore} into ascending order (lowest score to highest + * score). Ties are broken based on an alpha sort of the index name. + */ + static private class ASC implements Comparator<HostScore> { + + public int compare(HostScore arg0, HostScore arg1) { + + if (arg0.rawScore < arg1.rawScore) { + + return -1; + + } else if (arg0.rawScore > arg1.rawScore) { + + return 1; + + } + + return arg0.hostname.compareTo(arg1.hostname); + + } + + } + + /** + * Places {@link HostScore} into descending order (highest score to lowest + * score). Ties are broken based on an alpha sort of the index name. + */ + static private class DESC implements Comparator<HostScore> { + + public int compare(HostScore arg0, HostScore arg1) { + + if (arg1.rawScore < arg0.rawScore) { + + return -1; + + } else if (arg1.rawScore > arg0.rawScore) { + + return 1; + + } + + return arg0.hostname.compareTo(arg1.hostname); + + } + + } + + /** + * Normalizes a raw score in the context of totals for some data + * service. + * + * @param rawScore + * The raw score. + * @param totalRawScore + * The raw score computed from the totals. + * + * @return The normalized score. + */ + static public double normalize(final double rawScore, + final double totalRawScore) { + + if (totalRawScore == 0d) { + + return 0d; + + } + + return rawScore / totalRawScore; + + } + +} Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-03-16 22:59:51 UTC (rev 7987) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-03-16 23:09:34 UTC (rev 7988) @@ -216,6 +216,14 @@ + quorumService.getLogicalServiceZPath()).node("br") .close(); + p.text("PlatformStatsPlugIn=" + + (journal.getPlatformStatisticsCollector() == null ? "N/A" + : "Running")).node("br").close(); + + p.text("GangliaPlugIn=" + + (journal.getGangliaService() == null ? "N/A" + : "Running")).node("br").close(); + // Note: This is the *local* value of getHAStatus(). // Note: The HAReady token reflects whether or not the service // is Added: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java (rev 0) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java 2014-03-16 23:09:34 UTC (rev 7988) @@ -0,0 +1,49 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sail.webapp; + +import javax.servlet.http.HttpServletRequest; + +/** + * Load balancer policy interface. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * @see HALoadBalancerServlet + * @see <a href="http://trac.bigdata.com/ticket/624">HA Load Balancer</a> + */ +public interface IHALoadBalancerPolicy { + + /** + * Return the URL to which the request will be proxied. The returned URL + * must include the protocol, hostname and port (if a non-default port will + * be used) as well as the target request path. + * + * @param req + * The request. + * + * @return The URL. + */ + String proxyTo(HttpServletRequest req); + +} Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-03-16 22:59:51 UTC (rev 7987) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-03-16 23:09:34 UTC (rev 7988) @@ -581,15 +581,8 @@ final BNode aDataSet = g.getValueFactory().createBNode(); - /* - * Figure out the service end point. - * - * Note: This is just the requestURL as reported. This makes is - * possible to support virtual hosting and similar http proxy - * patterns since the SPARQL end point is just the URL at which the - * service is responding. - */ - final String serviceURI = req.getRequestURL().toString(); + // Figure out the service end point(s). + final String[] serviceURI = getServiceURIs(req); final VoID v = new VoID(g, tripleStore, serviceURI, aDataSet); Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-03-16 22:59:51 UTC (rev 7987) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-03-16 23:09:34 UTC (rev 7988) @@ -30,6 +30,7 @@ import java.io.StringWriter; import java.io.Writer; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.UUID; @@ -39,6 +40,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; +import javax.servlet.ServletRegistration; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -255,30 +257,9 @@ return; } - /* - * Figure out the service end point. - * - * Note: This code to figure out the service end point is a hack. It - * tends to work for the special case of ServiceDescription because - * there is an identity between the request URL and the service end - * point in this special case. - */ + // The serviceURIs for this graph. + final String[] serviceURI = BigdataServlet.getServiceURIs(req); - final String serviceURI; - { - - final StringBuffer sb = req.getRequestURL(); - - final int indexOf = sb.indexOf("?"); - - if (indexOf == -1) { - serviceURI = sb.toString(); - } else { - serviceURI = sb.substring(0, indexOf); - } - - } - /* * TODO Resolve the SD class name and ctor via a configuration property * for extensible descriptions. Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2014-03-16 22:59:51 UTC (rev 7987) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2014-03-16 23:09:34 UTC (rev 7988) @@ -379,7 +379,7 @@ /** * The service end point (from the constructor). */ - protected final String serviceURI; + protected final String[] serviceURI; /** * The value factory used to create values for the service description graph @@ -415,7 +415,7 @@ * @see #describeService() */ public SD(final Graph g, final AbstractTripleStore tripleStore, - final String serviceURI) { + final String... serviceURI) { if (g == null) throw new IllegalArgumentException(); @@ -426,6 +426,13 @@ if (serviceURI == null) throw new IllegalArgumentException(); + if (serviceURI.length == 0) + throw new IllegalArgumentException(); + + for (String s : serviceURI) + if (s == null) + throw new IllegalArgumentException(); + this.g = g; this.tripleStore = tripleStore; @@ -498,8 +505,12 @@ */ protected void describeServiceEndpoints() { - g.add(aService, SD.endpoint, g.getValueFactory().createURI(serviceURI)); + for (String uri : serviceURI) { + g.add(aService, SD.endpoint, g.getValueFactory().createURI(uri)); + + } + } /** Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/VoID.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/VoID.java 2014-03-16 22:59:51 UTC (rev 7987) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/VoID.java 2014-03-16 23:09:34 UTC (rev 7988) @@ -70,23 +70,23 @@ * The graph in which the service description is accumulated (from the * constructor). */ - protected final Graph g; + private final Graph g; /** * The KB instance that is being described (from the constructor). */ - protected final AbstractTripleStore tripleStore; + private final AbstractTripleStore tripleStore; /** - * The service end point (from the constructor). + * The service end point(s) (from the constructor). */ - protected final String serviceURI; + private final String[] serviceURI; /** * The value factory used to create value... [truncated message content] |
From: <tho...@us...> - 2014-03-16 22:59:55
|
Revision: 7987 http://sourceforge.net/p/bigdata/code/7987 Author: thompsonbry Date: 2014-03-16 22:59:51 +0000 (Sun, 16 Mar 2014) Log Message: ----------- Exposed the GagliaService when running in the Journal. Exposed the set of metrics that are being reported by default for the GangliaService. Pretty print of metrics. See #624. Modified Paths: -------------- branches/RDR/bigdata/src/java/com/bigdata/journal/Journal.java branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/HostReport.java Modified: branches/RDR/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/journal/Journal.java 2014-03-16 22:56:48 UTC (rev 7986) +++ branches/RDR/bigdata/src/java/com/bigdata/journal/Journal.java 2014-03-16 22:59:51 UTC (rev 7987) @@ -3716,7 +3716,7 @@ * * @see PlatformStatsPlugIn */ - protected AbstractStatisticsCollector getPlatformStatisticsCollector() { + public AbstractStatisticsCollector getPlatformStatisticsCollector() { final IPlugIn<Journal, AbstractStatisticsCollector> plugin = pluginPlatformStats .get(); @@ -3729,6 +3729,17 @@ return t; } + + public Object getGangliaService() { + + final IPlugIn<Journal, ?> plugin = pluginGanglia.get(); + + if (plugin == null) + return null; + + return plugin.getService(); + + } /** * An executor service used to read on the local disk. Modified: branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java =================================================================== --- branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java 2014-03-16 22:56:48 UTC (rev 7986) +++ branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java 2014-03-16 22:59:51 UTC (rev 7987) @@ -260,6 +260,18 @@ "gexec"// }; + /** + * Return a copy of the default metrics used to generate {@link IHostReport} + * s. + * + * @see #getHostReport() + */ + public String[] getDefaultHostReportOn() { + + return Arrays.copyOf(defaultHostReportOn, defaultHostReportOn.length); + + } + /** Place into descending order by load_one. */ private static final Comparator<IHostReport> defaultHostReportComparator = new HostReportComparator( "load_one", false/* asc */); Modified: branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/HostReport.java =================================================================== --- branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/HostReport.java 2014-03-16 22:56:48 UTC (rev 7986) +++ branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/HostReport.java 2014-03-16 22:59:51 UTC (rev 7987) @@ -25,7 +25,8 @@ private final String hostName; private final Map<String, IGangliaMetricMessage> metrics; - public HostReport(final String hostName, final Map<String,IGangliaMetricMessage> metrics) { + public HostReport(final String hostName, + final Map<String, IGangliaMetricMessage> metrics) { if(hostName == null) throw new IllegalArgumentException(); @@ -52,5 +53,12 @@ return metrics; } - + + @Override + public String toString() { + + return getClass().getName() + "{hostName=" + hostName + ", metrics=" + + metrics + "}"; + + } } \ No newline at end of file This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-03-16 22:56:52
|
Revision: 7986 http://sourceforge.net/p/bigdata/code/7986 Author: thompsonbry Date: 2014-03-16 22:56:48 +0000 (Sun, 16 Mar 2014) Log Message: ----------- Added dependency on jetty-client to support transparent proxying. The statistics collector now invokes the banner. See #624. Modified Paths: -------------- branches/RDR/.classpath branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java branches/RDR/build.xml branches/RDR/pom.xml Added Paths: ----------- branches/RDR/bigdata/lib/jetty/jetty-client-9.1.3.v20140225.jar Modified: branches/RDR/.classpath =================================================================== --- branches/RDR/.classpath 2014-03-16 11:24:12 UTC (rev 7985) +++ branches/RDR/.classpath 2014-03-16 22:56:48 UTC (rev 7986) @@ -34,7 +34,7 @@ <classpathentry exported="true" kind="lib" path="bigdata/lib/dsi-utils-1.0.6-020610.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/lgpl-utils-1.0.7-270114.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/apache/zookeeper-3.3.3.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/servlet-api-3.1.0.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/servlet-api-3.1.0.jar" sourcepath="/Users/bryan/Documents/workspace/org.eclipse.jetty.project-jetty-9.1-wsbatch"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/unimi/colt-1.2.0.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/icu/icu4j-4.8.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/icu/icu4j-charset-4.8.jar"/> @@ -58,6 +58,7 @@ <classpathentry exported="true" kind="lib" path="bigdata/lib/unimi/fastutil-5.1.5.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/lucene/lucene-analyzers-3.0.0.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/lucene/lucene-core-3.0.0.jar"/> + <classpathentry kind="lib" path="bigdata/lib/jetty/jetty-client-9.1.3.v20140225.jar" sourcepath="/Users/bryan/Documents/workspace/org.eclipse.jetty.project-jetty-9.1-wsbatch"/> <classpathentry exported="true" kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/high-scale-lib-v1.1.2.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/junit-ext-1.1-b3-dev.jar"/> @@ -80,7 +81,7 @@ <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-continuation-9.1.3.v20140225.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-http-9.1.3.v20140225.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-io-9.1.3.v20140225.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-proxy-9.1.3.v20140225.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-proxy-9.1.3.v20140225.jar" sourcepath="/Users/bryan/Documents/workspace/org.eclipse.jetty.project-jetty-9.1-wsbatch"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-rewrite-9.1.3.v20140225.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-security-9.1.3.v20140225.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-server-9.1.3.v20140225.jar" sourcepath="/Users/bryan/Documents/workspace/org.eclipse.jetty.project-jetty-9.1-wsbatch"/> Added: branches/RDR/bigdata/lib/jetty/jetty-client-9.1.3.v20140225.jar =================================================================== (Binary files differ) Index: branches/RDR/bigdata/lib/jetty/jetty-client-9.1.3.v20140225.jar =================================================================== --- branches/RDR/bigdata/lib/jetty/jetty-client-9.1.3.v20140225.jar 2014-03-16 11:24:12 UTC (rev 7985) +++ branches/RDR/bigdata/lib/jetty/jetty-client-9.1.3.v20140225.jar 2014-03-16 22:56:48 UTC (rev 7986) Property changes on: branches/RDR/bigdata/lib/jetty/jetty-client-9.1.3.v20140225.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Modified: branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2014-03-16 11:24:12 UTC (rev 7985) +++ branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2014-03-16 22:56:48 UTC (rev 7986) @@ -43,6 +43,7 @@ import org.apache.log4j.Logger; import org.apache.system.SystemUtil; +import com.bigdata.Banner; import com.bigdata.LRUNexus; import com.bigdata.counters.httpd.CounterSetHTTPD; import com.bigdata.counters.linux.StatisticsCollectorForLinux; @@ -754,7 +755,7 @@ * if no implementation is available for your operating system. */ public static void main(final String[] args) throws InterruptedException { - + Banner.banner(); final int DEFAULT_COUNT = 10; final int nargs = args.length; final int interval; Modified: branches/RDR/build.xml =================================================================== --- branches/RDR/build.xml 2014-03-16 11:24:12 UTC (rev 7985) +++ branches/RDR/build.xml 2014-03-16 22:56:48 UTC (rev 7986) @@ -996,6 +996,8 @@ tofile="${dist.lib}/jetty-xml.jar" /> <copy file="${bigdata-jetty.lib}/jetty-rewrite-${jetty.version}.jar" tofile="${dist.lib}/jetty-rewrite.jar" /> + <copy file="${bigdata-jetty.lib}/jetty-client-${jetty.version}.jar" + tofile="${dist.lib}/jetty-client.jar" /> <copy file="${bigdata-jetty.lib}/jetty-proxy-${jetty.version}.jar" tofile="${dist.lib}/jetty-proxy.jar" /> <copy file="${bigdata-jetty.lib}/servlet-api-${servlet.version}.jar" @@ -1749,8 +1751,8 @@ <!-- TODO ${path.separator}${dist.lib}/bigdata-gas.jar --> <property name="javac.test.classpath" - value="${classes.dir}${path.separator}${junit.jar}${path.separator}${junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${sesame-rio-test.jar}${path.separator}${dist.lib}/classserver.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/bigdata-ganglia.jar${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/icu4j-charset.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar${path.separator}${dist.lib}/jetty-continuation.jar${path.separator}${dist.lib}/jetty-http.jar${path.separator}${dist.lib}/jetty-io.jar${path.separator}${dist.lib}/jetty-server.jar${path.separator}${dist.lib}/jetty-util.jar${path.separator}${dist.lib}/jetty-webapp.jar${path.separator}${dist.lib}/jetty-servlet.jar${path.separator}${dist.lib}/jetty-security.jar${path.separator}${dist.lib}/jetty-xml.jar${path.separator}${dist.lib}/jetty-rewrite.jar${path.separator}${dist.lib}/jetty-proxy.jar${path.separator}${dist.lib}/servlet-api.jar${path.separator}${dist.lib}/commons-codec.jar${path.separator}${dist.lib}/commons-fileupload.jar${path.separator}${dist.lib}/commons-io.jar${path.separator}${dist.lib}/commons-logging.jar${path.separator}${dist.lib}/httpclient.jar${path.separator}${dist.lib}/httpclient-cache.jar${path.separator}${dist.lib}/httpcore.jar${path.separator}${dist.lib}/httpmime.jar" /> - + value="${classes.dir}${path.separator}${junit.jar}${path.separator}${junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${sesame-rio-test.jar}${path.separator}${dist.lib}/classserver.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/bigdata-ganglia.jar${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/icu4j-charset.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar${path.separator}${dist.lib}/jetty-continuation.jar${path.separator}${dist.lib}/jetty-http.jar${path.separator}${dist.lib}/jetty-io.jar${path.separator}${dist.lib}/jetty-server.jar${path.separator}${dist.lib}/jetty-util.jar${path.separator}${dist.lib}/jetty-webapp.jar${path.separator}${dist.lib}/jetty-servlet.jar${path.separator}${dist.lib}/jetty-security.jar${path.separator}${dist.lib}/jetty-xml.jar${path.separator}${dist.lib}/jetty-rewrite.jar${path.separator}${dist.lib}/jetty-client.jar${path.separator}${dist.lib}/jetty-proxy.jar${path.separator}${dist.lib}/servlet-api.jar${path.separator}${dist.lib}/commons-codec.jar${path.separator}${dist.lib}/commons-fileupload.jar${path.separator}${dist.lib}/commons-io.jar${path.separator}${dist.lib}/commons-logging.jar${path.separator}${dist.lib}/httpclient.jar${path.separator}${dist.lib}/httpclient-cache.jar${path.separator}${dist.lib}/httpcore.jar${path.separator}${dist.lib}/httpmime.jar" /> + <echo>javac </echo> <echo> javac.test.classpath="${javac.test.classpath}" @@ -2116,6 +2118,7 @@ <pathelement location="${dist.lib}/jetty-security.jar" /> <pathelement location="${dist.lib}/jetty-xml.jar" /> <pathelement location="${dist.lib}/jetty-rewrite.jar" /> + <pathelement location="${dist.lib}/jetty-client.jar" /> <pathelement location="${dist.lib}/jetty-proxy.jar" /> <pathelement location="${dist.lib}/servlet-api.jar" /> <pathelement location="${dist.lib}/commons-codec.jar" /> @@ -2129,7 +2132,7 @@ </path> <property name="run.class.path" - value="${junit.jar}${path.separator}${bigdata-test.jar}${path.separator}${junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${sesame-rio-test.jar}${path.separator}${dist.lib}/bigdata.jar${path.separator}${dist.lib}/colt.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/bigdata-ganglia.jar${path.separator}${dist.lib}/bigdata-gas${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/icu4j-charset.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/slf4j-log4j.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar${path.separator}${dist.lib}/jetty-continuation.jar${path.separator}${dist.lib}/jetty-http.jar${path.separator}${dist.lib}/jetty-io.jar${path.separator}${dist.lib}/jetty-server.jar${path.separator}${dist.lib}/jetty-util.jar${path.separator}${dist.lib}/jetty-webapp.jar${path.separator}${dist.lib}/jetty-servlet.jar${path.separator}${dist.lib}/jetty-security.jar${path.separator}${dist.lib}/jetty-xml.jar${path.separator}${dist.lib}/jetty-rewrite.jar${path.separator}${dist.lib}/jetty-proxy.jar${path.separator}${dist.lib}/servlet-api.jar${path.separator}${dist.lib}/commons-codec.jar${path.separator}${dist.lib}/commons-fileupload.jar${path.separator}${dist.lib}/commons-io.jar${path.separator}${dist.lib}/commons-logging.jar${path.separator}${dist.lib}/httpclient.jar${path.separator}${dist.lib}/httpclient-cache.jar${path.separator}${dist.lib}/httpcore.jar${path.separator}${dist.lib}/httpmime.jar" /> + value="${junit.jar}${path.separator}${bigdata-test.jar}${path.separator}${junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${sesame-rio-test.jar}${path.separator}${dist.lib}/bigdata.jar${path.separator}${dist.lib}/colt.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/bigdata-ganglia.jar${path.separator}${dist.lib}/bigdata-gas${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/icu4j-charset.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/slf4j-log4j.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar${path.separator}${dist.lib}/jetty-continuation.jar${path.separator}${dist.lib}/jetty-http.jar${path.separator}${dist.lib}/jetty-io.jar${path.separator}${dist.lib}/jetty-server.jar${path.separator}${dist.lib}/jetty-util.jar${path.separator}${dist.lib}/jetty-webapp.jar${path.separator}${dist.lib}/jetty-servlet.jar${path.separator}${dist.lib}/jetty-security.jar${path.separator}${dist.lib}/jetty-xml.jar${path.separator}${dist.lib}/jetty-rewrite.jar${path.separator}${dist.lib}/jetty-client.jar${path.separator}${dist.lib}/jetty-proxy.jar${path.separator}${dist.lib}/servlet-api.jar${path.separator}${dist.lib}/commons-codec.jar${path.separator}${dist.lib}/commons-fileupload.jar${path.separator}${dist.lib}/commons-io.jar${path.separator}${dist.lib}/commons-logging.jar${path.separator}${dist.lib}/httpclient.jar${path.separator}${dist.lib}/httpclient-cache.jar${path.separator}${dist.lib}/httpcore.jar${path.separator}${dist.lib}/httpmime.jar" /> <echo> classpath: ${run.class.path} </echo> @@ -2282,6 +2285,7 @@ <sysproperty key="jetty-security.jar" value="${dist.lib}/jetty-security.jar" /> <sysproperty key="jetty-xml.jar" value="${dist.lib}/jetty-xml.jar" /> <sysproperty key="jetty-rewrite.jar" value="${dist.lib}/jetty-rewrite.jar" /> + <sysproperty key="jetty-client.jar" value="${dist.lib}/jetty-client.jar" /> <sysproperty key="jetty-proxy.jar" value="${dist.lib}/jetty-proxy.jar" /> <sysproperty key="servlet-api.jar" value="${dist.lib}/servlet-api.jar" /> Modified: branches/RDR/pom.xml =================================================================== --- branches/RDR/pom.xml 2014-03-16 11:24:12 UTC (rev 7985) +++ branches/RDR/pom.xml 2014-03-16 22:56:48 UTC (rev 7986) @@ -466,6 +466,11 @@ </dependency> <dependency> <groupId>org.eclipse.jetty</groupId> + <artifactId>jetty-client</artifactId> + <version>${jetty.version}</version> + </dependency> + <dependency> + <groupId>org.eclipse.jetty</groupId> <artifactId>jetty-proxy</artifactId> <version>${jetty.version}</version> </dependency> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-03-16 11:24:16
|
Revision: 7985 http://sourceforge.net/p/bigdata/code/7985 Author: thompsonbry Date: 2014-03-16 11:24:12 +0000 (Sun, 16 Mar 2014) Log Message: ----------- I have modified the DefaultNodeCoder in the 1.3.0 development and maintenance branch to explicitly look for a 0L in the valid childAddr slots and throw an exception. This will prevent bad data from becoming durable. I do not observe any problems in the local B+Tree test suite. I am committing this change to CI for feedback from full CI runs. See #855 (Child identity is not persistent). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/btree/data/AbstractNodeDataRecordTestCase.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java 2014-03-16 11:04:19 UTC (rev 7984) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java 2014-03-16 11:24:12 UTC (rev 7985) @@ -43,6 +43,7 @@ import com.bigdata.io.AbstractFixedByteArrayBuffer; import com.bigdata.io.DataOutputBuffer; import com.bigdata.rawstore.Bytes; +import com.bigdata.rawstore.IRawStore; /** * Default implementation for immutable {@link INodeData} records. @@ -217,9 +218,18 @@ // childAddr[] : @todo code childAddr[] (needs IAddressManager if store aware coding). // final int O_childAddr = buf.pos(); for (int i = 0; i <= nkeys; i++) { + + /* + * See #855 (Child identity is not persistent). + */ + final long childAddr = node.getChildAddr(i); - buf.putLong(node.getChildAddr(i)); + if (childAddr == IRawStore.NULL) + throw new AssertionError("Child is not persistent: index=" + i + + " out of " + nkeys + " entries, " + node.toString()); + buf.putLong(childAddr); + } // final int O_childEntryCount = buf.pos(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/btree/data/AbstractNodeDataRecordTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/btree/data/AbstractNodeDataRecordTestCase.java 2014-03-16 11:04:19 UTC (rev 7984) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/btree/data/AbstractNodeDataRecordTestCase.java 2014-03-16 11:24:12 UTC (rev 7985) @@ -79,6 +79,9 @@ final long minimumVersionTimestamp = 0L; final long maximumVersionTimestamp = 0L; + // Must not be 0L. See #855. + childAddr[0] = 12L; + final INodeData expected = new MockNodeData(new ReadOnlyKeysRaba(nkeys, keys), spannedTupleCount, childAddr, childEntryCount, hasVersionTimestamps, minimumVersionTimestamp, @@ -104,6 +107,9 @@ final long minimumVersionTimestamp = System.currentTimeMillis(); final long maximumVersionTimestamp = System.currentTimeMillis() + 20; + // Must not be 0L. See #855. + childAddr[0] = 12L; + final INodeData expected = new MockNodeData(new ReadOnlyKeysRaba(nkeys, keys), spannedTupleCount, childAddr, childEntryCount, hasVersionTimestamps, minimumVersionTimestamp, This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-03-16 11:04:22
|
Revision: 7984 http://sourceforge.net/p/bigdata/code/7984 Author: thompsonbry Date: 2014-03-16 11:04:19 +0000 (Sun, 16 Mar 2014) Log Message: ----------- Modified DumpJournal.main() to catch Throwable and log @ ERROR rather than just catching RuntimeException. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java 2014-03-16 10:55:01 UTC (rev 7983) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java 2014-03-16 11:04:19 UTC (rev 7984) @@ -311,17 +311,22 @@ } - } catch( RuntimeException ex) { + } catch( Throwable t) { - ex.printStackTrace(); + t.printStackTrace(); - System.err.println("Error: "+ex+" on file: "+file); + System.err.println("Error: " + t + " on file: " + file); + + // Abnormal completion. + System.exit(1); } System.out.println("=================================="); } + + System.out.println("Normal completion"); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-03-16 10:55:04
|
Revision: 7983 http://sourceforge.net/p/bigdata/code/7983 Author: thompsonbry Date: 2014-03-16 10:55:01 +0000 (Sun, 16 Mar 2014) Log Message: ----------- Modified DumpJournal to track the #of errors when dumping the pages of an index and continue (unless interrupted or cancelled). See #855 (AssertionError: Child does not have persistent identity) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/AbstractBTree.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/PageStats.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/AbstractBTree.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/AbstractBTree.java 2014-03-15 14:00:37 UTC (rev 7982) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/AbstractBTree.java 2014-03-16 10:55:01 UTC (rev 7983) @@ -86,6 +86,7 @@ import com.bigdata.resources.OverflowManager; import com.bigdata.service.DataService; import com.bigdata.service.Split; +import com.bigdata.util.InnerCause; import com.bigdata.util.concurrent.Computable; import com.bigdata.util.concurrent.Memoizer; @@ -1537,11 +1538,29 @@ for (int i = 0; i <= nkeys; i++) { - // normal read following the node hierarchy, using cache, etc. - final AbstractNode<?> child = ((Node) node).getChild(i); + try { + + // normal read following the node hierarchy, using cache, etc. + final AbstractNode<?> child = ((Node) node).getChild(i); - // recursive dump - dumpPages(ndx, child, stats); + // recursive dump + dumpPages(ndx, child, stats); + + } catch (Throwable t) { + + if (InnerCause.isInnerCause(t, InterruptedException.class) + || InnerCause.isInnerCause(t, + InterruptedException.class)) { + throw new RuntimeException(t); + } + /* + * Log the error and track the #of errors, but keep scanning + * the index. + */ + stats.nerrors++; + log.error("Error reading child[i=" + i + "]: " + t, t); + continue; + } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/PageStats.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/PageStats.java 2014-03-15 14:00:37 UTC (rev 7982) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/PageStats.java 2014-03-16 10:55:01 UTC (rev 7983) @@ -54,6 +54,8 @@ * {@link #SLOT_SIZES}. */ public long blobs; + /** The #of errors encountered during traversal. */ + public long nerrors; /** * This map is used to report the histogram of pages based on the actual * byte count of the user data in the allocation when the backing slot size @@ -126,6 +128,7 @@ sb.append(",maxLeafBytes=" + maxLeafBytes); sb.append(",bytesPerNode=" + getBytesPerNode()); sb.append(",bytesPerLeaf=" + getBytesPerLeaf()); + sb.append(",nerros=" + nerrors); final long npages = (nleaves + nnodes); for (int i = 0; i < SLOT_SIZES.length; i++) { final long slotsThisSize = histogram[i]; @@ -174,6 +177,8 @@ sb.append('\t'); sb.append("nentries"); sb.append('\t'); + sb.append("nerrors"); + sb.append('\t'); sb.append("nodeBytes"); sb.append('\t'); sb.append("leafBytes"); @@ -241,6 +246,8 @@ sb.append('\t'); sb.append(stats.ntuples); sb.append('\t'); + sb.append(stats.nerrors); + sb.append('\t'); sb.append(stats.nodeBytes); sb.append('\t'); sb.append(stats.leafBytes); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |