This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tho...@us...> - 2013-12-30 16:02:25
|
Revision: 7701 http://bigdata.svn.sourceforge.net/bigdata/?rev=7701&view=rev Author: thompsonbry Date: 2013-12-30 16:02:18 +0000 (Mon, 30 Dec 2013) Log Message: ----------- Added parallel resampling of the vertices in the join graph. See #64 (RTO) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java 2013-12-30 15:36:02 UTC (rev 7700) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java 2013-12-30 16:02:18 UTC (rev 7701) @@ -799,16 +799,22 @@ } - for (Path x : a) { + // re-sample vertices. + sampleVertices(queryEngine, vertexLimit); + +// for (Map.Entry<Vertex, AtomicInteger> e : vertexLimit.entrySet()) { +// +//// final Vertex v = x.vertices[0]; +//// final int limit = vertexLimit.get(v).intValue(); +// +// final Vertex v = e.getKey(); +// +// final int limit = e.getValue().get(); +// +// v.sample(queryEngine, limit, sampleType); +// +// } - final Vertex v = x.vertices[0]; - - final int limit = vertexLimit.get(v).intValue(); - - v.sample(queryEngine, limit, sampleType); - - } - } /* @@ -830,15 +836,15 @@ int nunderflow = 0; for (Path x : a) { - /* - * Get the new sample limit for the path. - * - * TODO We only need to increase the sample limit starting at the - * vertex where we have a cardinality underflow or variability in - * the cardinality estimate. This is increasing the limit in each - * round of expansion, which means that we are reading more data - * than we really need to read. - */ + /* + * Get the new sample limit for the path. + * + * TODO We only need to increase the sample limit starting at the + * vertex where we have a cardinality underflow or variability in + * the cardinality estimate. This is increasing the limit in each + * round of expansion, which means that we are reading more data + * than we really need to read. + */ final int limit = x.getNewLimit(limitIn); // The cutoff join sample of the one step shorter path segment. @@ -1289,10 +1295,44 @@ */ public void sampleAllVertices(final QueryEngine queryEngine, final int limit) { + final Map<Vertex, AtomicInteger> vertexLimit = new LinkedHashMap<Vertex, AtomicInteger>(); + + for (Vertex v : V) { + + vertexLimit.put(v,new AtomicInteger(limit)); + + } + + sampleVertices(queryEngine, vertexLimit); + + } + + /** + * (Re-)sample a set of vertices. Sampling is done in parallel. + * <p> + * Note: A request to re-sample a vertex is a NOP unless the limit has been + * increased since the last time the vertex was sampled. It is also a NOP if + * the vertex has been fully materialized. + * + * @param queryEngine + * @param vertexLimit + * A map whose keys are the {@link Vertex vertices} to be + * (re-)samples and whose values are the <code>limit</code> to be + * used when sampling the associated vertex. This map is + * read-only so it only needs to be thread-safe for concurrent + * readers. + */ + private void sampleVertices(final QueryEngine queryEngine, + final Map<Vertex, AtomicInteger> vertexLimit) { + // Setup tasks to sample vertices. final List<Callable<Void>> tasks = new LinkedList<Callable<Void>>(); - for (Vertex v : V) { + for (Map.Entry<Vertex, AtomicInteger> e : vertexLimit.entrySet()) { + final Vertex v = e.getKey(); + + final int limit = e.getValue().get(); + tasks.add(new SampleVertexTask(queryEngine, v, limit, sampleType)); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-30 15:36:09
|
Revision: 7700 http://bigdata.svn.sourceforge.net/bigdata/?rev=7700&view=rev Author: thompsonbry Date: 2013-12-30 15:36:02 +0000 (Mon, 30 Dec 2013) Log Message: ----------- Added parallel expansion of the join paths. Each join path is expanded in a separate thread. However, if there are multiple possible expansions for a given join path, then those expansions are processed in sequence. Note: This change required the introduction of a thread-safe collection for the edgeSamples map within the scope of the expand() method. @see #64 (RTO) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/PathIds.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java 2013-12-30 15:05:21 UTC (rev 7699) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java 2013-12-30 15:36:02 UTC (rev 7700) @@ -1004,7 +1004,7 @@ */ public Path[] expand(final QueryEngine queryEngine, int limitIn, final int round, final Path[] a, - final Map<PathIds, EdgeSample> edgeSamples) throws Exception { + Map<PathIds, EdgeSample> edgeSamples) throws Exception { if (queryEngine == null) throw new IllegalArgumentException(); @@ -1016,7 +1016,14 @@ throw new IllegalArgumentException(); if (a.length == 0) throw new IllegalArgumentException(); - + + /* + * Ensure that we use a synchronized view of this collection since we + * will write on it from parallel threads when we expand the join paths + * in parallel. + */ + edgeSamples = Collections.synchronizedMap(edgeSamples); + // // increment the limit by itself in each round. // final int limit = (round + 1) * limitIn; @@ -1031,164 +1038,214 @@ if (log.isDebugEnabled()) log.debug("Expanding paths: #paths(in)=" + a.length); - final List<Path> tmp = new LinkedList<Path>(); + // The new set of paths to be explored. + final List<Path> tmpAll = new LinkedList<Path>(); + // Setup tasks to expand the current join paths. + final List<Callable<List<Path>>> tasks = new LinkedList<Callable<List<Path>>>(); for (Path x : a) { - /* - * We already increased the sample limit for the path in the loop - * above. - */ - final int limit = x.edgeSample.limit; + tasks.add(new ExpandPathTask(queryEngine, x, edgeSamples)); - /* - * The set of vertices used to expand this path in this round. - */ - final Set<Vertex> used = new LinkedHashSet<Vertex>(); + } - { + // Expand paths in parallel. + final List<Future<List<Path>>> futures = queryEngine.getIndexManager().getExecutorService() + .invokeAll(tasks); + + // Check future, collecting new paths from each task. + for(Future<List<Path>> f : futures) { - /* - * Any vertex which (a) does not appear in the path to be - * extended; (b) has not already been used to extend the path; - * and (c) does not share any variables indirectly via - * constraints is added to this collection. - * - * If we are not able to extend the path at least once using a - * constrained join then we will use this collection as the - * source of unconnected edges which need to be used to extend - * the path. - */ - final Set<Vertex> nothingShared = new LinkedHashSet<Vertex>(); - - // Consider all vertices. - for (Vertex tVertex : V) { + tmpAll.addAll(f.get()); + + } - // Figure out which vertices are already part of this path. - final boolean vFound = x.contains(tVertex); + /* + * Now examine the set of generated and sampled join paths. If any paths + * span the same vertices then they are alternatives and we can pick the + * best alternative now and prune the other alternatives for those + * vertices. + */ + final Path[] paths_tp1 = tmpAll.toArray(new Path[tmpAll.size()]); - if (vFound) { - // Vertex is already part of this path. - if (log.isTraceEnabled()) - log.trace("Vertex: " + tVertex - + " - already part of this path."); - continue; - } + final Path[] paths_tp1_pruned = pruneJoinPaths(paths_tp1, edgeSamples); - if (used.contains(tVertex)) { - // Vertex already used to extend this path. - if (log.isTraceEnabled()) - log - .trace("Vertex: " - + tVertex - + " - already used to extend this path."); - continue; - } + if (log.isDebugEnabled()) // shows which paths were pruned. + log.info("\n*** round=" + round + ": paths{in=" + a.length + + ",considered=" + paths_tp1.length + ",out=" + + paths_tp1_pruned.length + "}\n" + + JGraph.showTable(paths_tp1, paths_tp1_pruned)); - // FIXME RTO: Replace with StaticAnalysis. - if (!PartitionedJoinGroup.canJoinUsingConstraints(// - x.getPredicates(),// path - tVertex.pred,// vertex - C// constraints - )) { - /* - * Vertex does not share variables either directly - * or indirectly. - */ - if (log.isTraceEnabled()) - log - .trace("Vertex: " - + tVertex - + " - unconstrained join for this path."); - nothingShared.add(tVertex); - continue; - } + if (log.isInfoEnabled()) // only shows the surviving paths. + log.info("\n*** round=" + round + + ": paths{in=" + a.length + ",considered=" + + paths_tp1.length + ",out=" + paths_tp1_pruned.length + + "}\n" + JGraph.showTable(paths_tp1_pruned)); - // add the new vertex to the set of used vertices. - used.add(tVertex); + return paths_tp1_pruned; - // Extend the path to the new vertex. - final Path p = x - .addEdge(queryEngine, limit, tVertex, /* dynamicEdge, */ - C, x.getVertexCount() + 1 == V.length/* pathIsComplete */); + } - // Add to the set of paths for this round. - tmp.add(p); + /** + * Task expands a path by one edge into one or more new paths. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ + private class ExpandPathTask implements Callable<List<Path>> { - // Record the sample for the new path. - if (edgeSamples.put(new PathIds(p.getVertexIds()), - p.edgeSample) != null) - throw new AssertionError(); + private final QueryEngine queryEngine; + private final Path x; + /** + * Note: The collection provided by the caller MUST be thread-safe since + * this task will be run by parallel threads over the different join + * paths from the last round. There will not be any conflict over writes + * on this map since each {@link PathIds} instance resulting from the + * expansion will be unique, but we still need to use a thread-safe + * collection since there will be concurrent modifications to this map. + */ + private final Map<PathIds, EdgeSample> edgeSamples; - if (log.isTraceEnabled()) - log.trace("Extended path with dynamic edge: vnew=" - + tVertex.pred.getId() + ", new path=" + p); + public ExpandPathTask(final QueryEngine queryEngine, final Path x, + final Map<PathIds, EdgeSample> edgeSamples) { + this.queryEngine = queryEngine; + this.x = x; + this.edgeSamples = edgeSamples; + } + + @Override + public List<Path> call() throws Exception { + /* + * We already increased the sample limit for the path in the loop + * above. + */ + final int limit = x.edgeSample.limit; - } // next vertex. + /* + * The set of vertices used to expand this path in this round. + */ + final Set<Vertex> used = new LinkedHashSet<Vertex>(); - if (tmp.isEmpty()) { + /* + * Any vertex which (a) does not appear in the path to be + * extended; (b) has not already been used to extend the path; + * and (c) does not share any variables indirectly via + * constraints is added to this collection. + * + * If we are not able to extend the path at least once using a + * constrained join then we will use this collection as the + * source of unconnected edges which need to be used to extend + * the path. + */ + final Set<Vertex> nothingShared = new LinkedHashSet<Vertex>(); + + // The new set of paths to be explored as extensions to this path. + final List<Path> tmp = new LinkedList<Path>(); + + // Consider all vertices. + for (Vertex tVertex : V) { - /* - * No constrained joins were identified so we must consider - * edges which represent fully unconstrained joins. - */ + // Figure out which vertices are already part of this path. + final boolean vFound = x.contains(tVertex); - assert !nothingShared.isEmpty(); + if (vFound) { + // Vertex is already part of this path. + if (log.isTraceEnabled()) + log.trace("Vertex: " + tVertex + + " - already part of this path."); + continue; + } + if (used.contains(tVertex)) { + // Vertex already used to extend this path. + if (log.isTraceEnabled()) + log + .trace("Vertex: " + + tVertex + + " - already used to extend this path."); + continue; + } + + // FIXME RTO: Replace with StaticAnalysis. + if (!PartitionedJoinGroup.canJoinUsingConstraints(// + x.getPredicates(),// path + tVertex.pred,// vertex + C// constraints + )) { /* - * Choose any vertex from the set of those which do - * not share any variables with the join path. Since - * all of these are fully unconstrained joins we do - * not want to expand the join path along multiple - * edges in this iterator, just along a single - * unconstrained edge. + * Vertex does not share variables either directly + * or indirectly. */ - final Vertex tVertex = nothingShared.iterator().next(); - - // Extend the path to the new vertex. - final Path p = x - .addEdge(queryEngine, limit, tVertex,/* dynamicEdge */ - C, x.getVertexCount() + 1 == V.length/* pathIsComplete */); + if (log.isTraceEnabled()) + log + .trace("Vertex: " + + tVertex + + " - unconstrained join for this path."); + nothingShared.add(tVertex); + continue; + } - // Add to the set of paths for this round. - tmp.add(p); + // add the new vertex to the set of used vertices. + used.add(tVertex); - if (log.isTraceEnabled()) - log.trace("Extended path with dynamic edge: vnew=" - + tVertex.pred.getId() + ", new path=" + p); + // Extend the path to the new vertex. + final Path p = x + .addEdge(queryEngine, limit, tVertex, /* dynamicEdge, */ + C, x.getVertexCount() + 1 == V.length/* pathIsComplete */); - } + // Add to the set of paths for this round. + tmp.add(p); - } + // Record the sample for the new path. + if (edgeSamples.put(new PathIds(p.getVertexIds()), + p.edgeSample) != null) + throw new AssertionError(); - } // next path + if (log.isTraceEnabled()) + log.trace("Extended path with dynamic edge: vnew=" + + tVertex.pred.getId() + ", new path=" + p); - /* - * Now examine the set of generated and sampled join paths. If any paths - * span the same vertices then they are alternatives and we can pick the - * best alternative now and prune the other alternatives for those - * vertices. - */ - final Path[] paths_tp1 = tmp.toArray(new Path[tmp.size()]); + } // next target vertex. - final Path[] paths_tp1_pruned = pruneJoinPaths(paths_tp1, edgeSamples); + if (tmp.isEmpty()) { - if (log.isDebugEnabled()) // shows which paths were pruned. - log.info("\n*** round=" + round + ": paths{in=" + a.length - + ",considered=" + paths_tp1.length + ",out=" - + paths_tp1_pruned.length + "}\n" - + JGraph.showTable(paths_tp1, paths_tp1_pruned)); + /* + * No constrained joins were identified as extensions of this + * join path, so we must consider edges which represent fully + * unconstrained joins. + */ - if (log.isInfoEnabled()) // only shows the surviving paths. - log.info("\n*** round=" + round - + ": paths{in=" + a.length + ",considered=" - + paths_tp1.length + ",out=" + paths_tp1_pruned.length - + "}\n" + JGraph.showTable(paths_tp1_pruned)); + assert !nothingShared.isEmpty(); - return paths_tp1_pruned; + /* + * Choose any vertex from the set of those which do + * not share any variables with the join path. Since + * all of these are fully unconstrained joins we do + * not want to expand the join path along multiple + * edges in this iterator, just along a single + * unconstrained edge. + */ + final Vertex tVertex = nothingShared.iterator().next(); + + // Extend the path to the new vertex. + final Path p = x + .addEdge(queryEngine, limit, tVertex,/* dynamicEdge */ + C, x.getVertexCount() + 1 == V.length/* pathIsComplete */); + // Add to the set of paths for this round. + tmp.add(p); + + if (log.isTraceEnabled()) + log.trace("Extended path with dynamic edge: vnew=" + + tVertex.pred.getId() + ", new path=" + p); + + } // if(tmp.isEmpty()) + + return tmp; + + } + } - + /** * Return the {@link Vertex} whose {@link IPredicate} is associated with * the given {@link BOp.Annotations#BOP_ID}. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/PathIds.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/PathIds.java 2013-12-30 15:05:21 UTC (rev 7699) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/PathIds.java 2013-12-30 15:36:02 UTC (rev 7700) @@ -83,6 +83,7 @@ * vertices may be expressed and also recognizes that the vertex hash codes * are based on the bop ids, which are often small integers. */ + @Override public int hashCode() { int h = hash; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-30 15:05:28
|
Revision: 7699 http://bigdata.svn.sourceforge.net/bigdata/?rev=7699&view=rev Author: thompsonbry Date: 2013-12-30 15:05:21 +0000 (Mon, 30 Dec 2013) Log Message: ----------- Modified Name2Addr.handleCommit() to use the ExecutionExceptions pattern and to log each error. See #788 (Name2Addr does not report all root causes if the commit fails) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java 2013-12-30 15:02:24 UTC (rev 7698) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java 2013-12-30 15:05:21 UTC (rev 7699) @@ -36,6 +36,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Locale; import java.util.Properties; @@ -80,6 +81,7 @@ import com.bigdata.rawstore.IRawStore; import com.bigdata.resources.IndexManager; import com.bigdata.resources.ResourceManager; +import com.bigdata.util.concurrent.ExecutionExceptions; import com.ibm.icu.text.Collator; import cutthecrap.utils.striterators.Filter; @@ -738,6 +740,7 @@ } // for each entry in the snapshot of the commit list. + final List<Throwable> causes = new LinkedList<Throwable>(); for (Future<CommitIndexTask> f : futures) { try { @@ -775,11 +778,13 @@ } catch (InterruptedException e) { - throw new RuntimeException(e); + log.error("l.name: " + e, e); + causes.add(e); } catch (ExecutionException e) { - throw new RuntimeException(e); + log.error("l.name: " + e, e); + causes.add(e); } @@ -842,6 +847,17 @@ // // set commitTime on the btree (transient field). // l.btree.setLastCommitTime(commitTime); + } // next Future. + + /* + * If there were any errors, then throw an exception listing them. + */ + if (!causes.isEmpty()) { + // Throw exception back to the leader. + if (causes.size() == 1) + throw new RuntimeException(causes.get(0)); + throw new RuntimeException("nerrors=" + causes.size(), + new ExecutionExceptions(causes)); } // and flushes out this btree as well. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-30 15:02:32
|
Revision: 7698 http://bigdata.svn.sourceforge.net/bigdata/?rev=7698&view=rev Author: thompsonbry Date: 2013-12-30 15:02:24 +0000 (Mon, 30 Dec 2013) Log Message: ----------- - Adjusted defaults for the RTO in QueryHints. - Added the limit, sampleType, and nedges to the explain view for the JoinGraph operator. - Added parallel sampling of vertices to the JGraph. See #64 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2013-12-30 14:28:09 UTC (rev 7697) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2013-12-30 15:02:24 UTC (rev 7698) @@ -1162,9 +1162,13 @@ w.write(cdata(Arrays.toString(vars))); } if (bop instanceof JoinGraph) { - final Path p = ((JoinGraph) bop).getPath(q); - final Map<PathIds, EdgeSample> samples = ((JoinGraph) bop) + final JoinGraph t = ((JoinGraph) bop); + final Path p = t.getPath(q); + final Map<PathIds, EdgeSample> samples = t .getSamples(q); + w.write(cdata("sampleType=" + t.getSampleType())); + w.write(cdata(", limit=" + t.getLimit())); + w.write(cdata(", nedges=" + t.getNEdges())); if (p != null && samples != null) { // Show the RTO discovered join path. w.write("<pre>"); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java 2013-12-30 14:28:09 UTC (rev 7697) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java 2013-12-30 15:02:24 UTC (rev 7698) @@ -37,6 +37,9 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; import org.apache.log4j.Logger; @@ -50,6 +53,7 @@ import com.bigdata.bop.joinGraph.NoSolutionsException; import com.bigdata.bop.joinGraph.PartitionedJoinGroup; import com.bigdata.bop.rdf.join.DataSetJoin; +import com.bigdata.util.concurrent.ExecutionExceptions; /** * A runtime optimizer for a join graph. The {@link JoinGraph} bears some @@ -1225,19 +1229,91 @@ * not share a variable directly and hence will materialize the * full cross product before filtering which is *really* * expensive. - * */ public void sampleAllVertices(final QueryEngine queryEngine, final int limit) { + // Setup tasks to sample vertices. + final List<Callable<Void>> tasks = new LinkedList<Callable<Void>>(); for (Vertex v : V) { - v.sample(queryEngine, limit, sampleType); + tasks.add(new SampleVertexTask(queryEngine, v, limit, sampleType)); } + // Sample vertices in parallel. + final List<Future<Void>> futures; + try { + + futures = queryEngine.getIndexManager().getExecutorService() + .invokeAll(tasks); + + } catch (InterruptedException e) { + // propagate interrupt. + Thread.currentThread().interrupt(); + return; + } + + // Check futures for errors. + final List<Throwable> causes = new LinkedList<Throwable>(); + for (Future<Void> f : futures) { + try { + f.get(); + } catch (InterruptedException e) { + log.error(e); + causes.add(e); + } catch (ExecutionException e) { + log.error(e); + causes.add(e); + } + } + + /* + * If there were any errors, then throw an exception listing them. + */ + if (!causes.isEmpty()) { + // Throw exception back to the leader. + if (causes.size() == 1) + throw new RuntimeException(causes.get(0)); + throw new RuntimeException("nerrors=" + causes.size(), + new ExecutionExceptions(causes)); + } + } /** + * Task to sample a vertex. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + static private class SampleVertexTask implements Callable<Void> { + + private final QueryEngine queryEngine; + private final Vertex v; + private final int limit; + private final SampleType sampleType; + + public SampleVertexTask(final QueryEngine queryEngine, final Vertex v, + final int limit, final SampleType sampleType) { + + this.queryEngine = queryEngine; + this.v = v; + this.limit = limit; + this.sampleType = sampleType; + + } + + @Override + public Void call() throws Exception { + + v.sample(queryEngine, limit, sampleType); + + return null; + } + + } + + /** * Estimate the cardinality of each edge. This is only invoked by * {@link #round0(QueryEngine, int, int)} when it is trying to select the * minimum cardinality edges which it will use to create the initial set of @@ -1362,9 +1438,9 @@ paths.add(p); - } + } // next other vertex. - } + } // next vertex return paths.toArray(new Path[paths.size()]); @@ -1393,7 +1469,7 @@ */ public Path[] pruneJoinPaths(final Path[] a, final Map<PathIds, EdgeSample> edgeSamples) { - final boolean neverPruneUnderflow = true; + final boolean neverPruneUnderflow = true; /* * Find the length of the longest path(s). All shorter paths are * dropped in each round. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java 2013-12-30 14:28:09 UTC (rev 7697) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java 2013-12-30 15:02:24 UTC (rev 7698) @@ -111,10 +111,18 @@ * evaluation (default {@value #DEFAULT_RTO_LIMIT}). A larger limit and a * random sample will provide a more accurate estimate of the cost of the * join paths but are increase the runtime overhead of the RTO optimizer. + * Smaller value can lead to underflow in the cardinality estimates of the + * cutoff joins resulting in a longer execution time for the RTO since more + * paths may be explored or the explored paths must be deepened in order to + * differentiate their costs. Values corresponding to up to the expected + * number of triples on an index page should have the same IO cost since + * there will be a single page read for the vertex and the output of the + * join will be cutoff once the desired number of join results has been + * produced. */ String RTO_LIMIT = "RTO-limit"; - int DEFAULT_RTO_LIMIT = 20; + int DEFAULT_RTO_LIMIT = 100; /** * The <i>nedges</i> edges of the join graph having the lowest cardinality @@ -124,11 +132,14 @@ * <i>nedges</i> of those edges having the lowest cardinality are used to * form the initial set of join paths. For each edge selected to form a join * path, the starting vertex will be the vertex of that edge having the - * lower cardinality. + * lower cardinality. If ONE (1), then only those join paths that start with + * the two vertices having the lowest cardinality will be explored (this was + * the published behavior for ROX). When greater than ONE, a broader search + * of the join paths will be carried out. */ String RTO_NEDGES = "RTO-nedges"; - int DEFAULT_RTO_NEDGES = 2; + int DEFAULT_RTO_NEDGES = 1; /** * Query hint sets the optimistic threshold for the static join order This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-30 14:28:15
|
Revision: 7697 http://bigdata.svn.sourceforge.net/bigdata/?rev=7697&view=rev Author: thompsonbry Date: 2013-12-30 14:28:09 +0000 (Mon, 30 Dec 2013) Log Message: ----------- added summary for projection in explain view. modified RTO integration to pass through the values for limit and nedges from the query hints to the RTO. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2013-12-30 13:41:38 UTC (rev 7696) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2013-12-30 14:28:09 UTC (rev 7697) @@ -57,6 +57,7 @@ import com.bigdata.bop.joinGraph.rto.Path; import com.bigdata.bop.joinGraph.rto.PathIds; import com.bigdata.bop.rdf.join.ChunkedMaterializationOp; +import com.bigdata.bop.solutions.ProjectionOp; import com.bigdata.counters.render.XHTMLRenderer; import com.bigdata.rawstore.Bytes; import com.bigdata.rdf.sparql.ast.eval.AST2BOpJoins; @@ -1079,62 +1080,39 @@ } w.write(TDx); - // summary + // operator summary (not shown for the "total" line). w.write(TD); - if (pred != null) { - w.write(cdata(pred.getClass().getSimpleName())); - w.write(cdata("[" + predId + "](")); - final Iterator<BOp> itr = pred.argIterator(); - boolean first = true; - while (itr.hasNext()) { - if (first) { - first = false; - } else - w.write(cdata(", ")); - final IVariableOrConstant<?> x = (IVariableOrConstant<?>) itr - .next(); - if (x.isVar()) { - w.write(cdata("?")); - w.write(cdata(x.getName())); - } else { - w.write(cdata(x.get().toString())); - //sb.append(((IV)x.get()).getValue()); - } - } - w.write(cdata(")")); - } - if (bop.getProperty(NamedSetAnnotations.NAMED_SET_REF) != null) { - /* - * Named Solution Set(s) summary. - */ - final Object namedSetRef = bop - .getProperty(NamedSetAnnotations.NAMED_SET_REF); - if (namedSetRef instanceof INamedSolutionSetRef) { - final INamedSolutionSetRef ref = (INamedSolutionSetRef) namedSetRef; - final IRunningQuery t = getRunningQuery(q, ref.getQueryId()); - if (t != null) { - final IQueryAttributes attrs = t == null ? null : t - .getAttributes(); - final IHashJoinUtility state = (IHashJoinUtility) (attrs == null ? null - : attrs.get(ref)); - if (state != null) { - // Prefer the IHashUtilityState - w.write(cdata(state.toString())); - w.write(cdata(",namedSet=")); - w.write(cdata(ref.getLocalName())); + if(!summary) { + if (pred != null) { + w.write(cdata(pred.getClass().getSimpleName())); + w.write(cdata("[" + predId + "](")); + final Iterator<BOp> itr = pred.argIterator(); + boolean first = true; + while (itr.hasNext()) { + if (first) { + first = false; + } else + w.write(cdata(", ")); + final IVariableOrConstant<?> x = (IVariableOrConstant<?>) itr + .next(); + if (x.isVar()) { + w.write(cdata("?")); + w.write(cdata(x.getName())); } else { - // Otherwise the NamedSolutionSetRef - w.write(cdata(ref.toString())); + w.write(cdata(x.get().toString())); + //sb.append(((IV)x.get()).getValue()); } - // w.write(cdata(", joinvars=" + - // Arrays.toString(ref.joinVars))); } - } else { - final INamedSolutionSetRef[] refs = (INamedSolutionSetRef[]) namedSetRef; - for (int i = 0; i < refs.length; i++) { - final INamedSolutionSetRef ref = refs[i]; - if (i > 0) - w.write(cdata(",")); + w.write(cdata(")")); + } + if (bop.getProperty(NamedSetAnnotations.NAMED_SET_REF) != null) { + /* + * Named Solution Set(s) summary. + */ + final Object namedSetRef = bop + .getProperty(NamedSetAnnotations.NAMED_SET_REF); + if (namedSetRef instanceof INamedSolutionSetRef) { + final INamedSolutionSetRef ref = (INamedSolutionSetRef) namedSetRef; final IRunningQuery t = getRunningQuery(q, ref.getQueryId()); if (t != null) { final IQueryAttributes attrs = t == null ? null : t @@ -1144,31 +1122,61 @@ if (state != null) { // Prefer the IHashUtilityState w.write(cdata(state.toString())); + w.write(cdata(",namedSet=")); + w.write(cdata(ref.getLocalName())); } else { // Otherwise the NamedSolutionSetRef w.write(cdata(ref.toString())); } + // w.write(cdata(", joinvars=" + + // Arrays.toString(ref.joinVars))); } - // w.write(cdata(", joinvars=" + - // Arrays.toString(refs[0].joinVars))); + } else { + final INamedSolutionSetRef[] refs = (INamedSolutionSetRef[]) namedSetRef; + for (int i = 0; i < refs.length; i++) { + final INamedSolutionSetRef ref = refs[i]; + if (i > 0) + w.write(cdata(",")); + final IRunningQuery t = getRunningQuery(q, ref.getQueryId()); + if (t != null) { + final IQueryAttributes attrs = t == null ? null : t + .getAttributes(); + final IHashJoinUtility state = (IHashJoinUtility) (attrs == null ? null + : attrs.get(ref)); + if (state != null) { + // Prefer the IHashUtilityState + w.write(cdata(state.toString())); + } else { + // Otherwise the NamedSolutionSetRef + w.write(cdata(ref.toString())); + } + } + // w.write(cdata(", joinvars=" + + // Arrays.toString(refs[0].joinVars))); + } } } - } - if (bop instanceof ChunkedMaterializationOp) { - final IVariable<?>[] vars = (IVariable<?>[]) bop - .getProperty(ChunkedMaterializationOp.Annotations.VARS); - w.write(cdata(Arrays.toString(vars))); - } - if (bop instanceof JoinGraph) { - final Path p = ((JoinGraph) bop).getPath(q); - final Map<PathIds, EdgeSample> samples = ((JoinGraph) bop) - .getSamples(q); - if (p != null && samples != null) { - // Show the RTO discovered join path. - w.write("<pre>"); - w.write(cdata(JGraph.showPath(p, samples))); - w.write("</pre>"); + if (bop instanceof ChunkedMaterializationOp) { + final IVariable<?>[] vars = (IVariable<?>[]) bop + .getProperty(ChunkedMaterializationOp.Annotations.VARS); + w.write(cdata(Arrays.toString(vars))); } + if (bop instanceof JoinGraph) { + final Path p = ((JoinGraph) bop).getPath(q); + final Map<PathIds, EdgeSample> samples = ((JoinGraph) bop) + .getSamples(q); + if (p != null && samples != null) { + // Show the RTO discovered join path. + w.write("<pre>"); + w.write(cdata(JGraph.showPath(p, samples))); + w.write("</pre>"); + } + } + if (bop instanceof ProjectionOp) { + final IVariable<?>[] vars = (IVariable<?>[]) bop + .getProperty(ProjectionOp.Annotations.SELECT); + w.write(cdata(Arrays.toString(vars))); + } } w.write(TDx); // end summary Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2013-12-30 13:41:38 UTC (rev 7696) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2013-12-30 14:28:09 UTC (rev 7697) @@ -232,6 +232,10 @@ */ final SampleType sampleType = joinGroup.getProperty( QueryHints.RTO_SAMPLE_TYPE, QueryHints.DEFAULT_RTO_SAMPLE_TYPE); + final int limit = joinGroup.getProperty(QueryHints.RTO_LIMIT, + QueryHints.DEFAULT_RTO_LIMIT); + final int nedges = joinGroup.getProperty(QueryHints.RTO_NEDGES, + QueryHints.DEFAULT_RTO_NEDGES); left = new JoinGraph(leftOrEmpty(left),// new NV(BOp.Annotations.BOP_ID, ctx.nextId()),// new NV(BOp.Annotations.EVALUATION_CONTEXT, @@ -245,10 +249,8 @@ preds.toArray(new Predicate[preds.size()])),// new NV(JoinGraph.Annotations.CONSTRAINTS, constraints .toArray(new IConstraint[constraints.size()])),// - new NV(JoinGraph.Annotations.LIMIT, - JoinGraph.Annotations.DEFAULT_LIMIT),// - new NV(JoinGraph.Annotations.NEDGES, - JoinGraph.Annotations.DEFAULT_NEDGES),// + new NV(JoinGraph.Annotations.LIMIT, limit),// + new NV(JoinGraph.Annotations.NEDGES, nedges),// new NV(JoinGraph.Annotations.SAMPLE_TYPE, sampleType.name())// ); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-30 13:41:47
|
Revision: 7696 http://bigdata.svn.sourceforge.net/bigdata/?rev=7696&view=rev Author: thompsonbry Date: 2013-12-30 13:41:38 +0000 (Mon, 30 Dec 2013) Log Message: ----------- BOpBase - @Override annotations. QueryLog - pretty print of the RTO computed join path. JGraph - exposed the edge samples to JoinGraph operator. See #64 (RTO). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpBase.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Path.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/SampleBase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpBase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpBase.java 2013-12-30 12:27:34 UTC (rev 7695) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpBase.java 2013-12-30 13:41:38 UTC (rev 7696) @@ -175,6 +175,7 @@ } + @Override final public Map<String, Object> annotations() { return Collections.unmodifiableMap(annotations); @@ -234,6 +235,7 @@ } + @Override public BOp get(final int index) { return args[index]; @@ -286,6 +288,7 @@ } + @Override public int arity() { return args.length; @@ -297,6 +300,7 @@ * <p> * Note: This is much less efficient than {@link #argIterator()}. */ + @Override final public List<BOp> args() { return Collections.unmodifiableList(Arrays.asList(args)); @@ -309,6 +313,7 @@ * The iterator does not support removal. (This is more efficient than * #args()). */ + @Override final public Iterator<BOp> argIterator() { return new ArgIterator(); @@ -339,6 +344,7 @@ } // shallow copy + @Override public BOp[] toArray() { final BOp[] a = new BOp[args.length]; @@ -475,6 +481,7 @@ // // } + @Override public Object getProperty(final String name) { return annotations.get(name); @@ -543,6 +550,7 @@ } + @Override public BOpBase setProperty(final String name, final Object value) { final BOpBase tmp = (BOpBase) this.clone(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2013-12-30 12:27:34 UTC (rev 7695) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2013-12-30 13:41:38 UTC (rev 7696) @@ -51,6 +51,11 @@ import com.bigdata.bop.join.IHashJoinUtility; import com.bigdata.bop.join.PipelineJoin; import com.bigdata.bop.join.PipelineJoinStats; +import com.bigdata.bop.joinGraph.rto.EdgeSample; +import com.bigdata.bop.joinGraph.rto.JGraph; +import com.bigdata.bop.joinGraph.rto.JoinGraph; +import com.bigdata.bop.joinGraph.rto.Path; +import com.bigdata.bop.joinGraph.rto.PathIds; import com.bigdata.bop.rdf.join.ChunkedMaterializationOp; import com.bigdata.counters.render.XHTMLRenderer; import com.bigdata.rawstore.Bytes; @@ -768,9 +773,9 @@ w.write("<th>evalOrder</th>"); // [0..n-1] if (clusterStats) { w.write("<th>evalContext</th>"); - w.write("<th>controller</th>"); } if (detailedStats) { + w.write("<th>controller</th>"); w.write("<th>bopId</th>"); w.write("<th>predId</th>"); } @@ -996,9 +1001,9 @@ w.write(TDx); if (clusterStats) { w.write(TD); w.write(TDx); // evalContext - w.write(TD); w.write(TDx); // controller? } if (detailedStats) { + w.write(TD); w.write(TDx); // controller w.write(TD); w.write("total"); // bopId w.write(TDx); @@ -1035,12 +1040,12 @@ w.write(TD); w.write(cdata(bop.getEvaluationContext().toString())); w.write(TDx); + } + if (detailedStats) { w.write(TD); w.write(cdata(bop.getProperty(BOp.Annotations.CONTROLLER, - BOp.Annotations.DEFAULT_CONTROLLER).toString())); + BOp.Annotations.DEFAULT_CONTROLLER).toString())); w.write(TDx); - } - if (detailedStats) { w.write(TD); w.write(Integer.toString(bopId)); w.write(TDx); @@ -1074,6 +1079,7 @@ } w.write(TDx); + // summary w.write(TD); if (pred != null) { w.write(cdata(pred.getClass().getSimpleName())); @@ -1153,7 +1159,18 @@ .getProperty(ChunkedMaterializationOp.Annotations.VARS); w.write(cdata(Arrays.toString(vars))); } - w.write(TDx); + if (bop instanceof JoinGraph) { + final Path p = ((JoinGraph) bop).getPath(q); + final Map<PathIds, EdgeSample> samples = ((JoinGraph) bop) + .getSamples(q); + if (p != null && samples != null) { + // Show the RTO discovered join path. + w.write("<pre>"); + w.write(cdata(JGraph.showPath(p, samples))); + w.write("</pre>"); + } + } + w.write(TDx); // end summary /* * Static optimizer metadata. @@ -1432,13 +1449,13 @@ } - private static String cdata(String s) { + private static String cdata(final String s) { return XHTMLRenderer.cdata(s); } - private static String attrib(String s) { + private static String attrib(final String s) { return XHTMLRenderer.attrib(s); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java 2013-12-30 12:27:34 UTC (rev 7695) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java 2013-12-30 13:41:38 UTC (rev 7696) @@ -234,6 +234,7 @@ return Collections.unmodifiableList(Arrays.asList(V)); } + @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("JoinGraph"); @@ -354,19 +355,88 @@ * @throws IllegalArgumentException * if <i>nedges</i> is non-positive. * @throws Exception + */ + public Path runtimeOptimizer(final QueryEngine queryEngine, + final int limit, final int nedges) throws NoSolutionsException, + Exception { + + /* + * This map is used to associate join path segments (expressed as an + * ordered array of bopIds) with edge sample to avoid redundant effort. + * + * FIXME RTO: HEAP MANAGMENT : This map holds references to the cutoff + * join samples. To ensure that the map has the minimum heap footprint, + * it must be scanned each time we prune the set of active paths and any + * entry which is not a prefix of an active path should be removed. + * + * TODO RTO: MEMORY MANAGER : When an entry is cleared from this map, + * the corresponding allocation in the memory manager (if any) must be + * released. The life cycle of the map needs to be bracketed by a + * try/finally in order to ensure that all allocations associated with + * the map are released no later than when we leave the lexicon scope of + * that clause. + */ + final Map<PathIds, EdgeSample> edgeSamples = new LinkedHashMap<PathIds, EdgeSample>(); + + return runtimeOptimizer(queryEngine, limit, nedges, edgeSamples); + + } + + /** + * Find a good join path in the data given the join graph. The join path is + * not guaranteed to be the best join path (the search performed by the + * runtime optimizer is not exhaustive) but it should always be a "good" + * join path and may often be the "best" join path. * - * @todo It is possible that this could throw a {@link NoSolutionsException} - * if the cutoff joins do not use a large enough sample to find a join - * path which produces at least one solution (except that no solutions - * for an optional join do not cause the total to fail, nor do no - * solutions for some part of a UNION). + * @param queryEngine + * The query engine. + * @param limit + * The limit for sampling a vertex and the initial limit for + * cutoff join evaluation. + * @param nedges + * The edges in the join graph are sorted in order of increasing + * cardinality and up to <i>nedges</i> of the edges having the + * lowest cardinality are used to form the initial set of join + * paths. For each edge selected to form a join path, the + * starting vertex will be the vertex of that edge having the + * lower cardinality. + * @param sampleType + * Type safe enumeration indicating the algorithm which will be + * used to sample the initial vertices. + * @param edgeSamples + * A map that will be populated with the samples associated with + * each non-pruned join path. This map is used to associate join + * path segments (expressed as an ordered array of bopIds) with + * edge sample to avoid redundant effort. * - * TODO We need to automatically increase the depth of search for - * queries where we have cardinality estimation underflows or punt to - * another method to decide the join order. + * @return The join path identified by the runtime query optimizer as the + * best path given the join graph and the data. + * + * @throws NoSolutionsException + * If there are no solutions for the join graph in the data (the + * query does not have any results). + * @throws IllegalArgumentException + * if <i>queryEngine</i> is <code>null</code>. + * @throws IllegalArgumentException + * if <i>limit</i> is non-positive. + * @throws IllegalArgumentException + * if <i>nedges</i> is non-positive. + * @throws Exception + * + * TODO It is possible that this could throw a + * {@link NoSolutionsException} if the cutoff joins do not use a + * large enough sample to find a join path which produces at + * least one solution (except that no solutions for an optional + * join do not cause the total to fail, nor do no solutions for + * some part of a UNION). + * + * TODO We need to automatically increase the depth of search + * for queries where we have cardinality estimation underflows + * or punt to another method to decide the join order. */ public Path runtimeOptimizer(final QueryEngine queryEngine, - final int limit, final int nedges) + final int limit, final int nedges, + final Map<PathIds, EdgeSample> edgeSamples) throws Exception, NoSolutionsException { if (queryEngine == null) @@ -375,6 +445,8 @@ throw new IllegalArgumentException(); if (nedges <= 0) throw new IllegalArgumentException(); + if (edgeSamples == null) + throw new IllegalArgumentException(); // Setup the join graph. Path[] paths = round0(queryEngine, limit, nedges); @@ -396,24 +468,6 @@ int round = 1; - /* - * This map is used to associate join path segments (expressed as an - * ordered array of bopIds) with edge sample to avoid redundant effort. - * - * FIXME HEAP MANAGMENT : This map holds references to the cutoff join - * samples. To ensure that the map has the minimum heap footprint, it - * must be scanned each time we prune the set of active paths and any - * entry which is not a prefix of an active path should be removed. - * - * TODO MEMORY MANAGER : When an entry is cleared from this map, the - * corresponding allocation in the memory manager (if any) must be - * released. The life cycle of the map needs to be bracketed by a - * try/finally in order to ensure that all allocations associated with - * the map are released no later than when we leave the lexicon scope of - * that clause. - */ - final Map<PathIds, EdgeSample> edgeSamples = new LinkedHashMap<PathIds, EdgeSample>(); - while (paths.length > 0 && round < nvertices - 1) { /* @@ -1027,6 +1081,7 @@ continue; } + // FIXME RTO: Replace with StaticAnalysis. if (!PartitionedJoinGroup.canJoinUsingConstraints(// x.getPredicates(),// path tVertex.pred,// vertex @@ -1616,7 +1671,8 @@ * @param edgeSamples * A map containing the samples utilized by the {@link Path}. */ - static String showPath(final Path x, final Map<PathIds, EdgeSample> edgeSamples) { + static public String showPath(final Path x, + final Map<PathIds, EdgeSample> edgeSamples) { if (x == null) throw new IllegalArgumentException(); final StringBuilder sb = new StringBuilder(); @@ -1672,20 +1728,20 @@ predId,// NA, "", NA, NA, NA, NA, NA, NA, NA, NA, NA, "", NA, NA);//,NA,NA); } else if(sample instanceof VertexSample) { - /* - * Show the vertex sample for the initial vertex. - * - * Note: we do not store all fields for a vertex sample - * which are stored for an edge sample because so many of - * the values are redundant for a vertex sample. Therefore, - * this sets up local variables which are equivalent to the - * various edge sample columns that we will display. - */ - final long sumRangeCount = sample.estCard; - final long estRead = sample.estCard; - final long tuplesRead = Math.min(sample.estCard, sample.limit); - final long outputCount = Math.min(sample.estCard, sample.limit); - final long adjCard = Math.min(sample.estCard, sample.limit); + /* + * Show the vertex sample for the initial vertex. + * + * Note: we do not store all fields for a vertex sample + * which are stored for an edge sample because so many of + * the values are redundant for a vertex sample. Therefore, + * this sets up local variables which are equivalent to the + * various edge sample columns that we will display. + */ + final long sumRangeCount = sample.estCard; + final long estRead = sample.estCard; + final long tuplesRead = Math.min(sample.estCard, sample.limit); + final long outputCount = Math.min(sample.estCard, sample.limit); + final long adjCard = Math.min(sample.estCard, sample.limit); f.format("% 4d %10s%1s * %10s (%8s %8s %8s %8s %8s %8s) = % 10d % 10d%1s : %10d %10d",// %10d %10s",// predId,// " ",//srcSample.estCard Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java 2013-12-30 12:27:34 UTC (rev 7695) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java 2013-12-30 13:41:38 UTC (rev 7696) @@ -27,6 +27,7 @@ package com.bigdata.bop.joinGraph.rto; +import java.util.LinkedHashMap; import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.FutureTask; @@ -132,9 +133,32 @@ String SAMPLE_TYPE = JoinGraph.class.getName() + ".sampleType"; String DEFAULT_SAMPLE_TYPE = SampleType.RANDOM.name(); - + } + /** + * Query attribute names for the {@link JoinGraph}. The fully qualified name + * of the attribute is formed by appending the attribute name to the + * "bopId-", where <code>bopId</code> is the value returned by + * {@link BOp#getId()} + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + public interface Attributes { + + /** + * The join path selected by the RTO (output). + */ + String PATH = JoinGraph.class.getName() + ".path"; + + /** + * The samples associated with join path selected by the RTO (output). + */ + String SAMPLES = JoinGraph.class.getName() + ".samples"; + + } + /** * @see Annotations#SELECTED */ @@ -189,7 +213,55 @@ Annotations.DEFAULT_SAMPLE_TYPE)); } - + + /** + * Return the computed join path. + * + * @see Attributes#PATH + */ + public Path getPath(final IRunningQuery q) { + + return (Path) q.getAttributes().get(getId() + "-" + Attributes.PATH); + + } + + /** + * Return the samples associated with the computed join path. + * + * @see Annotations#SAMPLES + */ + @SuppressWarnings("unchecked") + public Map<PathIds, EdgeSample> getSamples(final IRunningQuery q) { + + return (Map<PathIds, EdgeSample>) q.getAttributes().get( + getId() + "-" + Attributes.SAMPLES); + + } + + private void setPath(final IRunningQuery q, final Path p) { + + q.getAttributes().put(getId() + "-" + Attributes.PATH, p); + + } + + private void setSamples(final IRunningQuery q, + final Map<PathIds, EdgeSample> samples) { + + q.getAttributes().put(getId() + "-" + Attributes.SAMPLES, samples); + + } + + /** + * Deep copy constructor. + * + * @param op + */ + public JoinGraph(final JoinGraph op) { + + super(op); + + } + public JoinGraph(final BOp[] args, final NV... anns) { this(args, NV.asMap(anns)); @@ -257,11 +329,11 @@ // private final JGraph g; - final private int limit; - - final private int nedges; - - private final SampleType sampleType; +// final private int limit; +// +// final private int nedges; +// +// final private SampleType sampleType; JoinGraphTask(final BOpContext<IBindingSet> context) { @@ -270,13 +342,13 @@ this.context = context; - // The initial cutoff sampling limit. - limit = getLimit(); - - // The initial number of edges (1 step paths) to explore. - nedges = getNEdges(); - - sampleType = getSampleType(); +// // The initial cutoff sampling limit. +// limit = getLimit(); +// +// // The initial number of edges (1 step paths) to explore. +// nedges = getNEdges(); +// +// sampleType = getSampleType(); // if (limit <= 0) // throw new IllegalArgumentException(); @@ -303,14 +375,38 @@ final long begin = System.nanoTime(); - // Create the join graph. + // Create the join graph. final JGraph g = new JGraph(getVertices(), getConstraints(), - sampleType); + getSampleType()); - // Find the best join path. - final Path p = g.runtimeOptimizer(context.getRunningQuery() - .getQueryEngine(), limit, nedges); + /* + * This map is used to associate join path segments (expressed as an + * ordered array of bopIds) with edge sample to avoid redundant effort. + * + * FIXME RTO: HEAP MANAGMENT : This map holds references to the cutoff + * join samples. To ensure that the map has the minimum heap footprint, + * it must be scanned each time we prune the set of active paths and any + * entry which is not a prefix of an active path should be removed. + * + * TODO RTO: MEMORY MANAGER : When an entry is cleared from this map, + * the corresponding allocation in the memory manager (if any) must be + * released. The life cycle of the map needs to be bracketed by a + * try/finally in order to ensure that all allocations associated with + * the map are released no later than when we leave the lexicon scope of + * that clause. + */ + final Map<PathIds, EdgeSample> edgeSamples = new LinkedHashMap<PathIds, EdgeSample>(); + // Find the best join path. + final Path p = g.runtimeOptimizer(context.getRunningQuery() + .getQueryEngine(), getLimit(), getNEdges(), edgeSamples); + + // Set attribute for the join path result. + setPath(context.getRunningQuery(), p); + + // Set attribute for the join path samples. + setSamples(context.getRunningQuery(), edgeSamples); + final long mark = System.nanoTime(); final long elapsed_queryOptimizer = mark - begin; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Path.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Path.java 2013-12-30 12:27:34 UTC (rev 7695) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Path.java 2013-12-30 13:41:38 UTC (rev 7696) @@ -181,6 +181,7 @@ } + @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("Path{["); @@ -751,7 +752,7 @@ if (sourceSample.getSample() == null) throw new IllegalArgumentException(); - // Figure out which constraints attach to each predicate. + // Figure out which constraints attach to each predicate. FIXME RTO Replace with StaticAnalysis. final IConstraint[][] constraintAttachmentArray = PartitionedJoinGroup .getJoinGraphConstraints(path, constraints, null/*knownBound*/, pathIsComplete); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/SampleBase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/SampleBase.java 2013-12-30 12:27:34 UTC (rev 7695) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/SampleBase.java 2013-12-30 13:41:38 UTC (rev 7696) @@ -118,7 +118,7 @@ /** * Release the sampled solution set. * - * TODO MEMORY MANAGER : release. + * FIXME RTO : MEMORY MANAGER : release. */ void releaseSample() { @@ -183,6 +183,7 @@ // NOP } + @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append(getClass().getSimpleName()); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2013-12-30 12:27:34 UTC (rev 7695) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2013-12-30 13:41:38 UTC (rev 7696) @@ -50,9 +50,7 @@ import com.bigdata.rdf.sparql.ast.IGroupMemberNode; import com.bigdata.rdf.sparql.ast.JoinGroupNode; import com.bigdata.rdf.sparql.ast.QueryHints; -import com.bigdata.rdf.sparql.ast.QueryOptimizerEnum; import com.bigdata.rdf.sparql.ast.StatementPatternNode; -import com.bigdata.rdf.sparql.ast.StaticAnalysis; /** * Integration with the Runtime Optimizer (RTO). @@ -122,7 +120,10 @@ final StatementPatternNode sp = (StatementPatternNode) child; final boolean optional = sp.isOptional(); if(optional) { - // TODO Handle optional SPs in joinGraph. + /* + * TODO Handle optional SPs in joinGraph (by ordering them + * in the tail so as to minimize the cost function). + */ break; } @@ -165,8 +166,6 @@ // Something the RTO can handle. sps.add(sp); /* - * TODO Assign predId? - * * FIXME Handle Triples vs Quads, Default vs Named Graph, and * DataSet. This probably means pushing more logic down into * the RTO from AST2BOpJoins. @@ -237,7 +236,7 @@ new NV(BOp.Annotations.BOP_ID, ctx.nextId()),// new NV(BOp.Annotations.EVALUATION_CONTEXT, BOpEvaluationContext.CONTROLLER),// - new NV(BOp.Annotations.CONTROLLER, true),// TODO DROP the "CONTROLLER" annotation. The concept is not required. + new NV(BOp.Annotations.CONTROLLER, true),// Drop "CONTROLLER" annotation? // new NV(PipelineOp.Annotations.MAX_PARALLEL, 1),// // new NV(PipelineOp.Annotations.LAST_PASS, true),// required new NV(JoinGraph.Annotations.SELECTED, selectVars This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-30 12:27:41
|
Revision: 7695 http://bigdata.svn.sourceforge.net/bigdata/?rev=7695&view=rev Author: thompsonbry Date: 2013-12-30 12:27:34 +0000 (Mon, 30 Dec 2013) Log Message: ----------- more javadoc on bds:search. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SliceServiceFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SliceServiceFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SliceServiceFactory.java 2013-12-30 12:02:34 UTC (rev 7694) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SliceServiceFactory.java 2013-12-30 12:27:34 UTC (rev 7695) @@ -68,7 +68,6 @@ import com.bigdata.rdf.sparql.ast.service.ServiceFactory; import com.bigdata.rdf.sparql.ast.service.ServiceNode; import com.bigdata.rdf.spo.DistinctMultiTermAdvancer; -import com.bigdata.rdf.spo.DistinctTermAdvancer; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.SPO; import com.bigdata.rdf.spo.SPOKeyOrder; @@ -82,7 +81,7 @@ /** * A factory for a statement pattern slicing service. * It accepts a group with a single triple pattern in it: - * + * <pre> * service bd:slice { * ?s rdf:type ex:Foo . * @@ -93,7 +92,7 @@ * # or range * bd:serviceParam bd:slice.range ?range * } - * + * </pre> * The service params are required and set the slicing parameters. You can * either request a slice or request a range count depending on the params. * The range count is useful when dealing with a "rangeSafe" predicate with Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java 2013-12-30 12:02:34 UTC (rev 7694) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java 2013-12-30 12:27:34 UTC (rev 7695) @@ -31,10 +31,19 @@ import org.openrdf.model.URI; import org.openrdf.model.impl.URIImpl; +import com.bigdata.rdf.sparql.ast.eval.SliceServiceFactory; + /** - * A vocabulary for bigdata specific extensions. + * A vocabulary for the bigdata full text search facility. Full text search may + * be used to combine text search and graph search. Low-latency, user facing + * search applications may be created by slicing the full text search results + * and feeding them incrementally into SPARQL queries. This approach allows the + * application to manage the cost of the SPARQL query by bounding the input. If + * necessary, additional results can be feed into the query. * + * @see SliceServiceFactory + * * @see <a * href="http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=FullTextSearch"> * Free Text Index </a> @@ -55,8 +64,8 @@ final String NAMESPACE = "http://www.bigdata.com/rdf/search#"; /** - * The name of a magic predicate recognized by the {@link com.bigdata.rdf.sail.BigdataSail} when - * it occurs in statement patterns such as: + * The name of a magic predicate recognized in SPARQL queries when it occurs + * in statement patterns such as: * * <pre> * @@ -64,25 +73,25 @@ * * </pre> * - * The value MUST be bound and MUST be a literal. The languageCode attribute - * is permitted, but a datatype attribute is not allowed. When specified, - * the languageCode attribute will be used to determine how the literal is - * tokenized - it does not filter for matches marked with that languageCode - * attribute. + * The value MUST be bound and MUST be a literal. The + * <code>languageCode</code> attribute is permitted. When specified, the + * <code>languageCode</code> attribute will be used to determine how the + * literal is tokenized - it does not filter for matches marked with that + * <code>languageCode</code> attribute. The <code>datatype</code> attribute + * is not allowed. * <p> - * The subject MUST NOT be bound. + * The subject MUST NOT be bound. * <p> * * This expression will evaluate to a set of bindings for the subject - * position corresponding to the indexed literals matching any of the - * terms obtained when the literal was tokenized. + * position corresponding to the indexed literals matching any of the terms + * obtained when the literal was tokenized. * * <p> * Note: The context position should be unbound when using statement * identifiers. */ -// URI SEARCH = new URIImpl(NAMESPACE+"search"); - final URI SEARCH = new URIImpl(NAMESPACE+"search"); + final URI SEARCH = new URIImpl(NAMESPACE + "search"); /** * Magic predicate used to query for free text search metadata, reporting @@ -100,17 +109,22 @@ * * </pre> * + * Relevance is the cosine of the angle between the query vector (search + * terms) and the document vector (terms in the indexed literals). The + * minimum relevance is ZERO (0.0). The maximum relevance is ONE (1.0). + * * @see #MIN_RELEVANCE * @see #MAX_RELEVANCE */ - final URI RELEVANCE = new URIImpl(NAMESPACE+"relevance"); + final URI RELEVANCE = new URIImpl(NAMESPACE + "relevance"); /** * Magic predicate used to query for free text search metadata, reporting * the rank (origin ONE (1)) of the search result amoung the search results * obtained for the search query. The rank is from ONE to N, where N is the - * number of search results from the full text index. Use in conjunction - * with {@link #SEARCH} as follows: + * number of search results from the full text index. {@link #MIN_RANK} and + * {@link #MAX_RANK} may be used to "slice" the full text index search + * results. Use this query hint conjunction with {@link #SEARCH} as follows: * <p> * * <pre> @@ -126,7 +140,7 @@ * @see #MIN_RANK * @see #MAX_RANK */ - final URI RANK = new URIImpl(NAMESPACE+"rank"); + final URI RANK = new URIImpl(NAMESPACE + "rank"); /** * Magic predicate used to limit the maximum rank of the free text search @@ -152,7 +166,7 @@ * feed the next "page" of free text results by changing the values for the * {@link #MIN_RANK} AND {@link #MAX_RANK} query hints. */ - final URI MAX_RANK = new URIImpl(NAMESPACE+"maxRank"); + final URI MAX_RANK = new URIImpl(NAMESPACE + "maxRank"); /** * The default for {@link #MAX_RANK}. @@ -177,7 +191,7 @@ * * The default is {@value #DEFAULT_MIN_RANK}. */ - final URI MIN_RANK = new URIImpl(NAMESPACE+"minRank"); + final URI MIN_RANK = new URIImpl(NAMESPACE + "minRank"); /** * The default for {@link #MIN_RANK} is 1, full text search results will @@ -185,58 +199,66 @@ */ final int DEFAULT_MIN_RANK = 1; - /** - * Magic predicate used to query for free text search metadata. Use in - * conjunction with {@link #SEARCH} as follows: - * <p> - * - * <pre> - * - * select ?s - * where { - * ?s bd:search "scale-out RDF triplestore" . - * ?s bd:minRelevance "0.5"^^xsd:double . - * } - * - * </pre> - * - * The relevance scores are in [0.0:1.0]. You should NOT specify a minimum - * relevance of ZERO (0.0) as this can drag in way too many unrelated - * results. The default is {@value #DEFAULT_MIN_RELEVANCE}. - */ - final URI MIN_RELEVANCE = new URIImpl(NAMESPACE+"minRelevance"); + /** + * Magic predicate used to query for free text search metadata. Use in + * conjunction with {@link #SEARCH} as follows: + * <p> + * + * <pre> + * + * select ?s + * where { + * ?s bd:search "scale-out RDF triplestore" . + * ?s bd:minRelevance "0.5"^^xsd:double . + * } + * + * </pre> + * + * The relevance scores are in [0.0:1.0], where 0.0 is the minimum possible + * relevance and 1.0 is the maximum possible relevance. You should NOT + * specify a minimum relevance of ZERO (0.0) as this can drag in way too + * many unrelated results. The default is {@value #DEFAULT_MIN_RELEVANCE}. + */ + final URI MIN_RELEVANCE = new URIImpl(NAMESPACE + "minRelevance"); final double DEFAULT_MIN_RELEVANCE = 0.0d; - /** - * Magic predicate used to query for free text search metadata. Use in - * conjunction with {@link #SEARCH} as follows: - * <p> - * - * <pre> - * - * select ?s - * where { - * ?s bd:search "scale-out RDF triplestore" . - * ?s bd:maxRelevance "0.9"^^xsd:double . - * } - * - * </pre> - * - * The relevance scores are in [0.0:1.0]. The default maximum relevance is - * {@value #DEFAULT_MAX_RELEVANCE}. - */ - final URI MAX_RELEVANCE = new URIImpl(NAMESPACE+"maxRelevance"); + /** + * Magic predicate used to query for free text search metadata. Use in + * conjunction with {@link #SEARCH} as follows: + * <p> + * + * <pre> + * + * select ?s + * where { + * ?s bd:search "scale-out RDF triplestore" . + * ?s bd:maxRelevance "0.9"^^xsd:double . + * } + * + * </pre> + * + * The relevance scores are in [0.0:1.0], where 0.0 is the minimum possible + * relevance and 1.0 is the maximum possible relevance. You should NOT + * specify a minimum relevance of ZERO (0.0) as this can drag in way too + * many unrelated results. The default maximum relevance is + * {@value #DEFAULT_MAX_RELEVANCE}. + */ + final URI MAX_RELEVANCE = new URIImpl(NAMESPACE + "maxRelevance"); /** - * The default value for {@link #MAX_RELEVANCE} unless overridden. + * The default value for {@link #MAX_RELEVANCE} unless overridden. */ final double DEFAULT_MAX_RELEVANCE = 1.0d; /** - * Magic predicate used to query for free text search metadata. Use - * in conjunction with {@link #SEARCH} as follows: + * Magic predicate used to query for free text search metadata indicates + * that all terms in the query must be found within a given literal in order + * for that literal to "match" the query (default + * {@value #DEFAULT_MATCH_ALL_TERMS}). Use in conjunction with + * {@link #SEARCH} as follows: * <p> + * * <pre> * * select ?s @@ -247,14 +269,16 @@ * * </pre> */ - final URI MATCH_ALL_TERMS = new URIImpl(NAMESPACE+"matchAllTerms"); + final URI MATCH_ALL_TERMS = new URIImpl(NAMESPACE + "matchAllTerms"); final boolean DEFAULT_MATCH_ALL_TERMS = false; /** - * Magic predicate used to query for free text search metadata. Use - * in conjunction with {@link #SEARCH} as follows: + * Magic predicate used to query for free text search metadata indicates + * that only exact string matches will be reported (the literal must contain + * the search string). Use in conjunction with {@link #SEARCH} as follows: * <p> + * * <pre> * * select ?s @@ -268,14 +292,16 @@ * This operation will be rather expensive as it will require materializing * all the hits to check their values. */ - final URI MATCH_EXACT = new URIImpl(NAMESPACE+"matchExact"); + final URI MATCH_EXACT = new URIImpl(NAMESPACE + "matchExact"); final boolean DEFAULT_MATCH_EXACT = false; /** - * Magic predicate used to query for free text search metadata. Use - * in conjunction with {@link #SEARCH} as follows: + * Magic predicate used to query for free text search metadata indicates + * that only search results that also pass the specified REGEX filter will + * be reported. Use in conjunction with {@link #SEARCH} as follows: * <p> + * * <pre> * * select ?s @@ -289,14 +315,18 @@ * This operation will be rather expensive as it will require materializing * all the hits to check their values. */ - final URI MATCH_REGEX = new URIImpl(NAMESPACE+"matchRegex"); + final URI MATCH_REGEX = new URIImpl(NAMESPACE + "matchRegex"); final String DEFAULT_MATCH_REGEX = null; /** - * Magic predicate used to query for free text search metadata. Use - * in conjunction with {@link #SEARCH} as follows: + * Magic predicate used to query for free text search metadata to turn on + * prefix matching. Prefix matching will match all full text index tokens + * that begin with the specified token(s) (default + * {@value #DEFAULT_PREFIX_MATCH}). Use in conjunction with {@link #SEARCH} + * as follows: * <p> + * * <pre> * * select ?s @@ -309,7 +339,7 @@ * <p> * This will turn on prefix matching. */ - final URI PREFIX_MATCH = new URIImpl(NAMESPACE+"prefixMatch"); + final URI PREFIX_MATCH = new URIImpl(NAMESPACE + "prefixMatch"); final boolean DEFAULT_PREFIX_MATCH = false; @@ -330,20 +360,23 @@ * The subject-centric search index must be enabled via * {@link AbstractTripleStore.Options#SUBJECT_CENTRIC_TEXT_INDEX}. */ - final URI SUBJECT_SEARCH = new URIImpl(NAMESPACE+"subjectSearch"); + final URI SUBJECT_SEARCH = new URIImpl(NAMESPACE + "subjectSearch"); final boolean DEFAULT_SUBJECT_SEARCH = false; /** - * Magic predicate used for the "search in search" service. Also serves - * as the identifier for the service itself. + * Magic predicate used for the "search in search" service. Also serves as + * the identifier for the service itself. */ - final URI SEARCH_IN_SEARCH = new URIImpl(NAMESPACE+"searchInSearch"); - + final URI SEARCH_IN_SEARCH = new URIImpl(NAMESPACE + "searchInSearch"); + /** - * Magic predicate used to query for free text search metadata. Use - * in conjunction with {@link #SEARCH} as follows: + * Magic predicate used to query for free text search metadata to set a + * deadline in milliseconds on the full text index search ( + * {@value #DEFAULT_TIMEOUT}). Use in conjunction with {@link #SEARCH} as + * follows: * <p> + * * <pre> * * select ?s @@ -356,11 +389,11 @@ * <p> * Timeout specified in milliseconds. */ - final URI SEARCH_TIMEOUT = new URIImpl(NAMESPACE+"searchTimeout"); - + final URI SEARCH_TIMEOUT = new URIImpl(NAMESPACE + "searchTimeout"); + /** * The default timeout for a free text search (milliseconds). */ final long DEFAULT_TIMEOUT = Long.MAX_VALUE; - + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-30 12:02:43
|
Revision: 7694 http://bigdata.svn.sourceforge.net/bigdata/?rev=7694&view=rev Author: thompsonbry Date: 2013-12-30 12:02:34 +0000 (Mon, 30 Dec 2013) Log Message: ----------- Added "DENSE" as a SampleType for SampleIndex class to support the RTO. DENSE simply takes N keys from the head of the key range for the access path. This significantly reduces the IO latency associated with the either random or uniform sampling since we will typically touch only one or two leaves while random or uniform sampling could easily touch all leaves spanned by the key range. Of course, this head sampling introduces a bias. Added RTO query hints for nedges, limit, and sample type. Added log @ INFO for the RTO overhead and the execution time for the optimized query. See #64 (RTO). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/ap/SampleIndex.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/QueryHintRegistry.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTOLimitQueryHint.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTONEdgesQueryHint.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTOSampleTypeQueryHint.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/ap/SampleIndex.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/ap/SampleIndex.java 2013-12-24 13:23:21 UTC (rev 7693) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/ap/SampleIndex.java 2013-12-30 12:02:34 UTC (rev 7694) @@ -106,7 +106,13 @@ /** * Sample offsets are computed randomly. */ - RANDOM; + RANDOM, + /** + * The samples will be dense and may bave a front bias. This mode + * emphasizes the locality of the samples on the index pages and + * minimizes the IO associated with sampling. + */ + DENSE; } /** @@ -323,6 +329,9 @@ seed(), limit, accessPath.getFromKey(), accessPath .getToKey()); break; + case DENSE: + advancer = new DenseSampleAdvancer<E>(); + break; default: throw new UnsupportedOperationException("SampleType=" + sampleType); @@ -339,6 +348,23 @@ } /** + * Dense samples in key order (simple index scan). + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @param <E> + */ + private static class DenseSampleAdvancer<E> extends Advancer<E> { + + private static final long serialVersionUID = 1L; + + @Override + protected void advance(final ITuple<E> tuple) { + // NOP + } + + } + + /** * An advancer pattern which is designed to take evenly distributed samples * from an index. The caller specifies the #of tuples to be sampled. This * class estimates the range count of the access path and then computes the Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java 2013-12-24 13:23:21 UTC (rev 7693) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java 2013-12-30 12:02:34 UTC (rev 7694) @@ -30,7 +30,10 @@ import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.FutureTask; +import java.util.concurrent.TimeUnit; +import org.apache.log4j.Logger; + import com.bigdata.bop.BOp; import com.bigdata.bop.BOpContext; import com.bigdata.bop.BOpIdFactory; @@ -107,11 +110,16 @@ int DEFAULT_LIMIT = 100; - /** - * The <i>nedges</i> edges of the join graph having the lowest - * cardinality will be used to generate the initial join paths (default - * {@value #DEFAULT_NEDGES}). This must be a positive integer. - */ + /** + * The <i>nedges</i> edges of the join graph having the lowest + * cardinality will be used to generate the initial join paths (default + * {@value #DEFAULT_NEDGES}). This must be a positive integer. The edges + * in the join graph are sorted in order of increasing cardinality and + * up to <i>nedges</i> of those edges having the lowest cardinality are + * used to form the initial set of join paths. For each edge selected to + * form a join path, the starting vertex will be the vertex of that edge + * having the lower cardinality. + */ String NEDGES = JoinGraph.class.getName() + ".nedges"; int DEFAULT_NEDGES = 2; @@ -292,7 +300,9 @@ */ @Override public Void call() throws Exception { - + + final long begin = System.nanoTime(); + // Create the join graph. final JGraph g = new JGraph(getVertices(), getConstraints(), sampleType); @@ -301,6 +311,10 @@ final Path p = g.runtimeOptimizer(context.getRunningQuery() .getQueryEngine(), limit, nedges); + final long mark = System.nanoTime(); + + final long elapsed_queryOptimizer = mark - begin; + // Factory avoids reuse of bopIds assigned to the predicates. final BOpIdFactory idFactory = new BOpIdFactory(); @@ -313,11 +327,20 @@ // Run the query, blocking until it is done. JoinGraph.runSubquery(context, queryOp); + final long elapsed_queryExecution = System.nanoTime() - mark; + + if (log.isInfoEnabled()) + log.info("RTO: queryOptimizer=" + + TimeUnit.NANOSECONDS.toMillis(elapsed_queryOptimizer) + + ", queryExecution=" + + TimeUnit.NANOSECONDS.toMillis(elapsed_queryExecution)); + return null; } } // class JoinGraphTask + private static final transient Logger log = Logger.getLogger(JGraph.class); /** * Execute the selected join path. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java 2013-12-24 13:23:21 UTC (rev 7693) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java 2013-12-30 12:02:34 UTC (rev 7694) @@ -32,6 +32,7 @@ import com.bigdata.bop.BufferAnnotations; import com.bigdata.bop.IPredicate.Annotations; import com.bigdata.bop.PipelineOp; +import com.bigdata.bop.ap.SampleIndex.SampleType; import com.bigdata.bop.engine.IRunningQuery; import com.bigdata.bop.engine.QueryEngine; import com.bigdata.bop.fed.QueryEngineFactory; @@ -90,6 +91,46 @@ QueryOptimizerEnum DEFAULT_OPTIMIZER = QueryOptimizerEnum.Static; /** + * The sampling bias for the runtime query optimizer. Dense sampling + * maximizes index locality but reduces robustness to correlations that do + * not exist in the head of the access path key range. Random sampling + * maximizes robustness, but pays a heavy IO cost. Even sampling also + * increases robustness, but will visit every Nth tuple and pays a heavy IO + * cost as a result. Thus dense sampling should be much faster but random or + * even sampling should detect bias that might not otherwise be exposed to + * the runtime query optimizer. + * + * @see SampleType + */ + String RTO_SAMPLE_TYPE = "RTO-sampleType"; + + SampleType DEFAULT_RTO_SAMPLE_TYPE = SampleType.DENSE; + + /** + * The limit for sampling a vertex and the initial limit for cutoff join + * evaluation (default {@value #DEFAULT_RTO_LIMIT}). A larger limit and a + * random sample will provide a more accurate estimate of the cost of the + * join paths but are increase the runtime overhead of the RTO optimizer. + */ + String RTO_LIMIT = "RTO-limit"; + + int DEFAULT_RTO_LIMIT = 20; + + /** + * The <i>nedges</i> edges of the join graph having the lowest cardinality + * will be used to generate the initial join paths (default + * {@value #DEFAULT_NEDGES}). This must be a positive integer. The edges in + * the join graph are sorted in order of increasing cardinality and up to + * <i>nedges</i> of those edges having the lowest cardinality are used to + * form the initial set of join paths. For each edge selected to form a join + * path, the starting vertex will be the vertex of that edge having the + * lower cardinality. + */ + String RTO_NEDGES = "RTO-nedges"; + + int DEFAULT_RTO_NEDGES = 2; + + /** * Query hint sets the optimistic threshold for the static join order * optimizer. */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2013-12-24 13:23:21 UTC (rev 7693) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2013-12-30 12:02:34 UTC (rev 7694) @@ -42,12 +42,15 @@ import com.bigdata.bop.NV; import com.bigdata.bop.PipelineOp; import com.bigdata.bop.ap.Predicate; +import com.bigdata.bop.ap.SampleIndex.SampleType; import com.bigdata.bop.joinGraph.rto.JGraph; import com.bigdata.bop.joinGraph.rto.JoinGraph; import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.internal.constraints.INeedsMaterialization; import com.bigdata.rdf.sparql.ast.IGroupMemberNode; import com.bigdata.rdf.sparql.ast.JoinGroupNode; +import com.bigdata.rdf.sparql.ast.QueryHints; +import com.bigdata.rdf.sparql.ast.QueryOptimizerEnum; import com.bigdata.rdf.sparql.ast.StatementPatternNode; import com.bigdata.rdf.sparql.ast.StaticAnalysis; @@ -228,6 +231,8 @@ * (unless we are going to run the RTO "bottom up") and build a hash * index. When the hash index is ready, we can execute the join group. */ + final SampleType sampleType = joinGroup.getProperty( + QueryHints.RTO_SAMPLE_TYPE, QueryHints.DEFAULT_RTO_SAMPLE_TYPE); left = new JoinGraph(leftOrEmpty(left),// new NV(BOp.Annotations.BOP_ID, ctx.nextId()),// new NV(BOp.Annotations.EVALUATION_CONTEXT, @@ -245,8 +250,7 @@ JoinGraph.Annotations.DEFAULT_LIMIT),// new NV(JoinGraph.Annotations.NEDGES, JoinGraph.Annotations.DEFAULT_NEDGES),// - new NV(JoinGraph.Annotations.SAMPLE_TYPE, - JoinGraph.Annotations.DEFAULT_SAMPLE_TYPE)// + new NV(JoinGraph.Annotations.SAMPLE_TYPE, sampleType.name())// ); // These joins were consumed. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/QueryHintRegistry.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/QueryHintRegistry.java 2013-12-24 13:23:21 UTC (rev 7693) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/QueryHintRegistry.java 2013-12-30 12:02:34 UTC (rev 7694) @@ -100,6 +100,9 @@ add(new RunLastHint()); add(new RunOnceHint()); add(new OptimizerQueryHint()); + add(new RTOSampleTypeQueryHint()); + add(new RTOLimitQueryHint()); + add(new RTONEdgesQueryHint()); add(new OptimisticQueryHint()); add(new AnalyticQueryHint()); Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTOLimitQueryHint.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTOLimitQueryHint.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTOLimitQueryHint.java 2013-12-30 12:02:34 UTC (rev 7694) @@ -0,0 +1,79 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Nov 27, 2011 + */ + +package com.bigdata.rdf.sparql.ast.hints; + +import com.bigdata.bop.joinGraph.rto.JGraph; +import com.bigdata.rdf.sparql.ast.ASTBase; +import com.bigdata.rdf.sparql.ast.JoinGroupNode; +import com.bigdata.rdf.sparql.ast.QueryHints; +import com.bigdata.rdf.sparql.ast.eval.AST2BOpContext; + +/** + * The query hint governing the initial sample size for the RTO optimizer. + * + * @see JGraph + * @see QueryHints#RTO_LIMIT + */ +final class RTOLimitQueryHint extends AbstractIntQueryHint { + + public RTOLimitQueryHint() { + super(QueryHints.RTO_LIMIT, QueryHints.DEFAULT_RTO_LIMIT); + } + + @Override + public Integer validate(final String value) { + + final int i = Integer.valueOf(value); + + if (i <= 0) + throw new IllegalArgumentException("Must be positive: hint=" + + getName() + ", value=" + value); + + return i; + + } + + @Override + public void handle(final AST2BOpContext ctx, final QueryHintScope scope, + final ASTBase op, final Integer value) { + + switch (scope) { + case Group: + case GroupAndSubGroups: + case Query: + case SubQuery: + if (op instanceof JoinGroupNode) { + _setAnnotation(ctx, scope, op, getName(), value); + } + return; + } + throw new QueryHintException(scope, op, getName(), value); + + } + +} \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTONEdgesQueryHint.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTONEdgesQueryHint.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTONEdgesQueryHint.java 2013-12-30 12:02:34 UTC (rev 7694) @@ -0,0 +1,80 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Nov 27, 2011 + */ + +package com.bigdata.rdf.sparql.ast.hints; + +import com.bigdata.bop.joinGraph.rto.JGraph; +import com.bigdata.rdf.sparql.ast.ASTBase; +import com.bigdata.rdf.sparql.ast.JoinGroupNode; +import com.bigdata.rdf.sparql.ast.QueryHints; +import com.bigdata.rdf.sparql.ast.eval.AST2BOpContext; + +/** + * The query hint governing the choice of the number of initial edges for the + * exploration of join paths in the join graph. + * + * @see JGraph + * @see QueryHints#RTO_NEDGES + */ +final class RTONEdgesQueryHint extends AbstractIntQueryHint { + + public RTONEdgesQueryHint() { + super(QueryHints.RTO_NEDGES, QueryHints.DEFAULT_RTO_NEDGES); + } + + @Override + public Integer validate(final String value) { + + int i = Integer.valueOf(value); + + if (i <= 0) + throw new IllegalArgumentException("Must be positive: hint=" + + getName() + ", value=" + value); + + return i; + + } + + @Override + public void handle(final AST2BOpContext ctx, final QueryHintScope scope, + final ASTBase op, final Integer value) { + + switch (scope) { + case Group: + case GroupAndSubGroups: + case Query: + case SubQuery: + if (op instanceof JoinGroupNode) { + _setAnnotation(ctx, scope, op, getName(), value); + } + return; + } + throw new QueryHintException(scope, op, getName(), value); + + } + +} \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTOSampleTypeQueryHint.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTOSampleTypeQueryHint.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTOSampleTypeQueryHint.java 2013-12-30 12:02:34 UTC (rev 7694) @@ -0,0 +1,76 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Nov 27, 2011 + */ + +package com.bigdata.rdf.sparql.ast.hints; + +import com.bigdata.bop.ap.SampleIndex; +import com.bigdata.bop.ap.SampleIndex.SampleType; +import com.bigdata.rdf.sparql.ast.ASTBase; +import com.bigdata.rdf.sparql.ast.JoinGroupNode; +import com.bigdata.rdf.sparql.ast.QueryHints; +import com.bigdata.rdf.sparql.ast.eval.AST2BOpContext; + +/** + * The query hint governing the choice of the sampling bais for the RTO + * optimizer. + * + * @see JGraph + * @see SampleType + * @see QueryHints#RTO_SAMPLE_TYPE + */ +final class RTOSampleTypeQueryHint extends AbstractQueryHint<SampleType> { + + public RTOSampleTypeQueryHint() { + super(QueryHints.RTO_SAMPLE_TYPE, QueryHints.DEFAULT_RTO_SAMPLE_TYPE); + } + + @Override + public SampleType validate(final String value) { + + return SampleType.valueOf(value); + + } + + @Override + public void handle(final AST2BOpContext ctx, final QueryHintScope scope, + final ASTBase op, final SampleType value) { + + switch (scope) { + case Group: + case GroupAndSubGroups: + case Query: + case SubQuery: + if (op instanceof JoinGroupNode) { + _setAnnotation(ctx, scope, op, getName(), value); + } + return; + } + throw new QueryHintException(scope, op, getName(), value); + + } + +} \ No newline at end of file This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-24 13:23:28
|
Revision: 7693 http://bigdata.svn.sourceforge.net/bigdata/?rev=7693&view=rev Author: thompsonbry Date: 2013-12-24 13:23:21 +0000 (Tue, 24 Dec 2013) Log Message: ----------- updated javadoc for bigdata free text search (BDS interface). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java 2013-12-24 00:33:28 UTC (rev 7692) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java 2013-12-24 13:23:21 UTC (rev 7693) @@ -28,18 +28,17 @@ package com.bigdata.rdf.store; -import org.openrdf.model.Resource; import org.openrdf.model.URI; -import org.openrdf.model.Value; import org.openrdf.model.impl.URIImpl; -import org.openrdf.model.vocabulary.SESAME; -import com.bigdata.rdf.sparql.ast.cache.DescribeServiceFactory; - /** * A vocabulary for bigdata specific extensions. * + * @see <a + * href="http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=FullTextSearch"> + * Free Text Index </a> + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id: BD.java 6786 2012-12-19 18:43:27Z thompsonbry $ */ @@ -86,9 +85,11 @@ final URI SEARCH = new URIImpl(NAMESPACE+"search"); /** - * Magic predicate used to query for free text search metadata. Use - * in conjunction with {@link #SEARCH} as follows: + * Magic predicate used to query for free text search metadata, reporting + * the relevance of the search result to the search query. Use in + * conjunction with {@link #SEARCH} as follows: * <p> + * * <pre> * * select ?s ?relevance @@ -98,13 +99,20 @@ * } * * </pre> + * + * @see #MIN_RELEVANCE + * @see #MAX_RELEVANCE */ final URI RELEVANCE = new URIImpl(NAMESPACE+"relevance"); /** - * Magic predicate used to query for free text search metadata. Use - * in conjunction with {@link #SEARCH} as follows: + * Magic predicate used to query for free text search metadata, reporting + * the rank (origin ONE (1)) of the search result amoung the search results + * obtained for the search query. The rank is from ONE to N, where N is the + * number of search results from the full text index. Use in conjunction + * with {@link #SEARCH} as follows: * <p> + * * <pre> * * select ?s ?rank @@ -114,13 +122,18 @@ * } * * </pre> + * + * @see #MIN_RANK + * @see #MAX_RANK */ final URI RANK = new URIImpl(NAMESPACE+"rank"); /** - * Magic predicate used to query for free text search metadata. Use + * Magic predicate used to limit the maximum rank of the free text search + * results to the specified value (default {@value #DEFAULT_MAX_RANK)}. Use * in conjunction with {@link #SEARCH} as follows: * <p> + * * <pre> * * select ?s @@ -131,7 +144,13 @@ * * </pre> * - * The default is {@value #DEFAULT_MAX_RANK}. + * You can use {@link #MIN_RANK} and {@link #MAX_RANK} together to page + * through the search results. This is often key to achieving low latency + * graph search. By limiting the number of results that are fed into the + * remained of the SPARQL query, you can ensure that the SPARQL query runs + * quickly. If you do not get enough results from the SPARQL query, you can + * feed the next "page" of free text results by changing the values for the + * {@link #MIN_RANK} AND {@link #MAX_RANK} query hints. */ final URI MAX_RANK = new URIImpl(NAMESPACE+"maxRank"); @@ -141,9 +160,11 @@ final int DEFAULT_MAX_RANK = Integer.MAX_VALUE; /** - * Magic predicate used to query for free text search metadata. Use + * Magic predicate used to limit the minimum rank of the free text search + * results to the specified value (default {@value #DEFAULT_MIN_RANK}). Use * in conjunction with {@link #SEARCH} as follows: * <p> + * * <pre> * * select ?s This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-24 00:33:36
|
Revision: 7692 http://bigdata.svn.sourceforge.net/bigdata/?rev=7692&view=rev Author: thompsonbry Date: 2013-12-24 00:33:28 +0000 (Tue, 24 Dec 2013) Log Message: ----------- Linked the parent query and the RTO subquery. Removed the EndOp from the RTO subquery. RTO is running (for simple queries). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/PartitionedJoinGroup.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/PartitionedJoinGroup.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/PartitionedJoinGroup.java 2013-12-23 22:45:36 UTC (rev 7691) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/PartitionedJoinGroup.java 2013-12-24 00:33:28 UTC (rev 7692) @@ -1143,14 +1143,14 @@ * * [This should perhaps be moved into the caller.] */ - lastOp = new EndOp(new BOp[] { lastOp }, NV - .asMap(new NV[] { - new NV(JoinGraph.Annotations.BOP_ID, idFactory.nextId()), // - new NV(JoinGraph.Annotations.EVALUATION_CONTEXT, - BOpEvaluationContext.CONTROLLER)// -// new NV(PipelineOp.Annotations.SHARED_STATE,true),// - }) // - ); +// lastOp = new EndOp(new BOp[] { lastOp }, NV +// .asMap(new NV[] { +// new NV(JoinGraph.Annotations.BOP_ID, idFactory.nextId()), // +// new NV(JoinGraph.Annotations.EVALUATION_CONTEXT, +// BOpEvaluationContext.CONTROLLER)// +//// new NV(PipelineOp.Annotations.SHARED_STATE,true),// +// }) // +// ); return lastOp; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java 2013-12-23 22:45:36 UTC (rev 7691) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java 2013-12-24 00:33:28 UTC (rev 7692) @@ -44,6 +44,7 @@ import com.bigdata.bop.ap.SampleIndex; import com.bigdata.bop.ap.SampleIndex.SampleType; import com.bigdata.bop.controller.AbstractSubqueryOp; +import com.bigdata.bop.engine.AbstractRunningQuery; import com.bigdata.bop.engine.IRunningQuery; import com.bigdata.bop.engine.QueryEngine; import com.bigdata.bop.joinGraph.PartitionedJoinGroup; @@ -304,6 +305,7 @@ final BOpIdFactory idFactory = new BOpIdFactory(); // Generate the query from the join path. + // FIXME Update this using StaticAnalysis logic. final PipelineOp queryOp = PartitionedJoinGroup.getQuery(idFactory, false/* distinct */, getSelected(), p.getPredicates(), getConstraints()); @@ -361,12 +363,16 @@ ICloseableIterator<IBindingSet[]> subquerySolutionItr = null; - final IRunningQuery runningQuery = queryEngine.eval(queryOp); + final IRunningQuery runningSubquery = queryEngine.eval(queryOp); try { + // Declare the child query to the parent. + ((AbstractRunningQuery) parentContext.getRunningQuery()) + .addChild(runningSubquery); + // Iterator visiting the subquery solutions. - subquerySolutionItr = runningQuery.iterator(); + subquerySolutionItr = runningSubquery.iterator(); // Copy solutions from the subquery to the query. final long nout = BOpUtility.copy(subquerySolutionItr, @@ -377,7 +383,7 @@ // System.out.println("nout=" + nout); // verify no problems. - runningQuery.get(); + runningSubquery.get(); // System.out.println("Future Ok"); @@ -400,7 +406,7 @@ } finally { - runningQuery.cancel(true/* mayInterruptIfRunning */); + runningSubquery.cancel(true/* mayInterruptIfRunning */); if (subquerySolutionItr != null) subquerySolutionItr.close(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-23 22:45:44
|
Revision: 7691 http://bigdata.svn.sourceforge.net/bigdata/?rev=7691&view=rev Author: thompsonbry Date: 2013-12-23 22:45:36 +0000 (Mon, 23 Dec 2013) Log Message: ----------- Partial integration of the RTO for SPARQL. For the moment, I am only targetting simple join groups with filters that do not require materialization of variable bindings. Once this is working, we can look into how to handle more of SPARQL. The RTO is currently turned on through a query hint. For example: {{{ PREFIX ub: <http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#> SELECT ?x ?y ?z WHERE { hint:Group hint:optimizer "Runtime". ?x a ub:Student . # v0 ?y a ub:Faculty . # v1 ?z a ub:Course . # v2 ?x ub:advisor ?y . # v3 ?y ub:teacherOf ?z . # v4 ?x ub:takesCourse ?z . # v5 } limit 1 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java 2013-12-23 22:42:23 UTC (rev 7690) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java 2013-12-23 22:45:36 UTC (rev 7691) @@ -228,6 +228,7 @@ } + @Override public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { return new FutureTask<Void>(new JoinGraphTask(context)); @@ -278,12 +279,17 @@ } - /** - * {@inheritDoc} - * - * - * TODO where to handle DISTINCT, ORDER BY, GROUP BY for join graph? - */ + /** + * {@inheritDoc} + * + * + * TODO where to handle DISTINCT, ORDER BY, GROUP BY for join graph? + * + * FIXME When run as sub-query, we need to fix point the upstream + * solutions and then flood them into the join graph. Samples of the + * known bound variables can be pulled from those initial solutions. + */ + @Override public Void call() throws Exception { // Create the join graph. Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2013-12-23 22:45:36 UTC (rev 7691) @@ -0,0 +1,259 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Dec 23, 2013 + */ +package com.bigdata.rdf.sparql.ast.eval; + +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.BOpEvaluationContext; +import com.bigdata.bop.IConstraint; +import com.bigdata.bop.IValueExpression; +import com.bigdata.bop.IVariable; +import com.bigdata.bop.NV; +import com.bigdata.bop.PipelineOp; +import com.bigdata.bop.ap.Predicate; +import com.bigdata.bop.joinGraph.rto.JGraph; +import com.bigdata.bop.joinGraph.rto.JoinGraph; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.internal.constraints.INeedsMaterialization; +import com.bigdata.rdf.sparql.ast.IGroupMemberNode; +import com.bigdata.rdf.sparql.ast.JoinGroupNode; +import com.bigdata.rdf.sparql.ast.StatementPatternNode; +import com.bigdata.rdf.sparql.ast.StaticAnalysis; + +/** + * Integration with the Runtime Optimizer (RTO). + * + * TODO The initial integration aims to run only queries that are simple join + * groups with filters. Once we have this integrated so that it can be enabled + * with a query hint, then we can look into handling subgroups, materialization, + * etc. Even handling filters will be somewhat tricky due to the requirement for + * conditional materialization of variable bindings in advance of certain + * {@link IValueExpression} depending on the {@link INeedsMaterialization} + * interface. Therefore, the place to start is with simple join groups and + * filters whose {@link IValueExpression}s do not require materialization. + * + * TODO We need a way to inspect the RTO behavior. It will get logged, but it + * would be nice to attach it to the query plan. Likewise, it would be nice to + * surface this to the caller so the RTO can be used to guide query construction + * UIs. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/64">Runtime + * Query Optimization</a> + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/258">Integrate + * RTO into SAIL</a> + * @see <a + * href="http://www-db.informatik.uni-tuebingen.de/files/research/pathfinder/publications/rox-demo.pdf"> + * ROX </a> + * @see JoinGraph + * @see JGraph + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class AST2BOpRTO extends AST2BOpJoins { + + /** + * Inspect the remainder of the join group. If we can isolate a join graph + * and filters, then we will push them down into an RTO JoinGroup. Since the + * joins have already been ordered by the static optimizer, we can accept + * them in sequence along with any attachable filters and (if we get at + * least 3 predicates) push them down into an RTO join group. + * <p> + * Note: Two predicates in a join group is not enough for the RTO to provide + * a different join ordering. Both the static optimizer and the RTO will + * always choose the AP with the smaller cardinality to run first. If there + * are only 2 predicates, then the other predicate will run second. You need + * at least three predicates before the RTO could provide a different + * answer. + */ + static protected PipelineOp convertRTOJoinGraph(PipelineOp left, + final JoinGroupNode joinGroup, final Set<IVariable<?>> doneSet, + final AST2BOpContext ctx, final AtomicInteger start) { + + final int arity = joinGroup.arity(); + + // The predicates for the RTO join group. + final Set<StatementPatternNode> sps = new LinkedHashSet<StatementPatternNode>(); + @SuppressWarnings("rawtypes") + final Set<Predicate> preds = new LinkedHashSet<Predicate>(); + final List<IConstraint> constraints = new LinkedList<IConstraint>(); + + // Examine the remaining joins, stopping at the first non-SP. + for (int i = start.get(); i < arity; i++) { + + final IGroupMemberNode child = (IGroupMemberNode) joinGroup + .get(i); + + if (child instanceof StatementPatternNode) { + // SP + final StatementPatternNode sp = (StatementPatternNode) child; + final boolean optional = sp.isOptional(); + if(optional) { + // TODO Handle optional SPs in joinGraph. + break; + } + + final List<IConstraint> attachedConstraints = getJoinConstraints(sp); + + @SuppressWarnings("rawtypes") + final Map<IConstraint, Set<IVariable<IV>>> needsMaterialization = + new LinkedHashMap<IConstraint, Set<IVariable<IV>>>(); + + getJoinConstraints(attachedConstraints, needsMaterialization); + + if (!needsMaterialization.isEmpty()) { + /* + * At least one variable requires (or might require) + * materialization. This is not currently handled by + * the RTO so we break out of the loop. + * + * TODO Handle materialization patterns within the RTO. + */ + break; + } + +// // Add constraints to the join for that predicate. +// anns.add(new NV(JoinAnnotations.CONSTRAINTS, getJoinConstraints( +// constraints, needsMaterialization))); + +// /* +// * Pull off annotations before we clear them from the predicate. +// */ +// final Scope scope = (Scope) pred.getProperty(Annotations.SCOPE); +// +// // true iff this is a quads access path. +// final boolean quads = pred.getProperty(Annotations.QUADS, +// Annotations.DEFAULT_QUADS); +// +// // pull of the Sesame dataset before we strip the annotations. +// final DatasetNode dataset = (DatasetNode) pred +// .getProperty(Annotations.DATASET); + + // Something the RTO can handle. + sps.add(sp); + /* + * TODO Assign predId? + * + * FIXME Handle Triples vs Quads, Default vs Named Graph, and + * DataSet. This probably means pushing more logic down into + * the RTO from AST2BOpJoins. + */ + final Predicate<?> pred = AST2BOpUtility.toPredicate(sp, ctx); +// final int joinId = ctx.nextId(); +// +// // annotations for this join. +// final List<NV> anns = new LinkedList<NV>(); +// +// anns.add(new NV(BOp.Annotations.BOP_ID, joinId)); + preds.add(pred); + if (attachedConstraints != null) { + // RTO will figure out where to attach these constraints. + constraints.addAll(attachedConstraints); + } + + } else { + // Non-SP. + break; + } + + } + + if (sps.size() < 3) { + + /* + * There are not enough joins for the RTO. + * + * TODO For incremental query construction UIs, it would be useful + * to run just the RTO and to run it with even a single join. This + * will give us sample values as well as estimates cardinalities. If + * the UI has triple patterns that do not join (yet), then those + * should be grouped. + */ + return left; + + } + + /* + * Figure out which variables are projected out of the RTO. + * + * TODO This should only include things that are not reused later in the + * query. + */ + final Set<IVariable<?>> selectVars = new LinkedHashSet<IVariable<?>>(); + { + + for (StatementPatternNode sp : sps) { + + // Note: recursive only matters for complex nodes, not SPs. + ctx.sa.getDefinitelyProducedBindings(sp, selectVars, true/* recursive */); + + } + + } + + /* + * FIXME When running the RTO as anything other than the top-level join + * group in the query plan and for the *FIRST* joins in the query plan, + * we need to flow in any solutions that are already in the pipeline + * (unless we are going to run the RTO "bottom up") and build a hash + * index. When the hash index is ready, we can execute the join group. + */ + left = new JoinGraph(leftOrEmpty(left),// + new NV(BOp.Annotations.BOP_ID, ctx.nextId()),// + new NV(BOp.Annotations.EVALUATION_CONTEXT, + BOpEvaluationContext.CONTROLLER),// + new NV(BOp.Annotations.CONTROLLER, true),// TODO DROP the "CONTROLLER" annotation. The concept is not required. + // new NV(PipelineOp.Annotations.MAX_PARALLEL, 1),// + // new NV(PipelineOp.Annotations.LAST_PASS, true),// required + new NV(JoinGraph.Annotations.SELECTED, selectVars + .toArray(new IVariable[selectVars.size()])),// + new NV(JoinGraph.Annotations.VERTICES, + preds.toArray(new Predicate[preds.size()])),// + new NV(JoinGraph.Annotations.CONSTRAINTS, constraints + .toArray(new IConstraint[constraints.size()])),// + new NV(JoinGraph.Annotations.LIMIT, + JoinGraph.Annotations.DEFAULT_LIMIT),// + new NV(JoinGraph.Annotations.NEDGES, + JoinGraph.Annotations.DEFAULT_NEDGES),// + new NV(JoinGraph.Annotations.SAMPLE_TYPE, + JoinGraph.Annotations.DEFAULT_SAMPLE_TYPE)// + ); + + // These joins were consumed. + start.addAndGet(sps.size()); + + return left; + + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java 2013-12-23 22:42:23 UTC (rev 7690) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java 2013-12-23 22:45:36 UTC (rev 7691) @@ -123,6 +123,7 @@ import com.bigdata.rdf.sparql.ast.ProjectionNode; import com.bigdata.rdf.sparql.ast.QueryBase; import com.bigdata.rdf.sparql.ast.QueryHints; +import com.bigdata.rdf.sparql.ast.QueryOptimizerEnum; import com.bigdata.rdf.sparql.ast.QueryRoot; import com.bigdata.rdf.sparql.ast.RangeNode; import com.bigdata.rdf.sparql.ast.SliceNode; @@ -165,7 +166,7 @@ * >Query Evaluation</a>. * */ -public class AST2BOpUtility extends AST2BOpJoins { +public class AST2BOpUtility extends AST2BOpRTO { private static final transient Logger log = Logger .getLogger(AST2BOpUtility.class); @@ -176,9 +177,8 @@ * <p> * <strong>NOTE:</strong> This is the entry for {@link ASTEvalHelper}. Do * NOT use this entry point directly. It will evolve when we integrate the - * RTO and/or the BindingsClause of the SPARQL 1.1 Federation extension. - * Applications should use the public entry points on {@link ASTEvalHelper} - * rather that this entry point. + * RTO. Applications should use public entry points on {@link ASTEvalHelper} + * instead. * * @param ctx * The evaluation context. @@ -191,15 +191,15 @@ * TODO We could handle the IBindingSet[] by stuffing the data into * a named solution set during the query rewrite and attaching that * named solution set to the AST. This could allow for very large - * solution sets to be passed into a query. Any such change would + * solution sets to be passed into a query. Any such change would * have to be deeply integrated with the SPARQL parser in order to * provide any benefit for the Java heap. - * - * TODO This logic is currently single-threaded. If we allow internal - * concurrency or when we integrate the RTO, we will need to ensure that - * the logic remains safely cancelable by an interrupt of the thread in - * which the query was submitted. See <a - * href="https://sourceforge.net/apps/trac/bigdata/ticket/715" > + * + * TODO This logic is currently single-threaded. If we allow + * internal concurrency or when we integrate the RTO, we will need + * to ensure that the logic remains safely cancelable by an + * interrupt of the thread in which the query was submitted. See <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/715" > * Interrupt of thread submitting a query for evaluation does not * always terminate the AbstractRunningQuery </a>. */ @@ -2506,6 +2506,26 @@ left = doMergeJoin(left, joinGroup, doneSet, start, ctx); } + + if (joinGroup.getProperty(QueryHints.OPTIMIZER, + QueryOptimizerEnum.Static).equals(QueryOptimizerEnum.Runtime)) { + + /* + * Inspect the remainder of the join group. If we can isolate a join + * graph and filters, then we will push them down into an RTO + * JoinGroup. Since the joins have already been ordered by the + * static optimizer, we can accept them in sequence along with any + * attachable filters. + */ + + left = convertRTOJoinGraph(left, joinGroup, doneSet, ctx, start); + + /* + * Fall through. Anything not handled in this section will be + * handled as part of normal join group processing below. + */ + + } /* * Translate the remainder of the group. @@ -2539,12 +2559,12 @@ sp.getQueryHints()); continue; } else if (child instanceof ArbitraryLengthPathNode) { - @SuppressWarnings("unchecked") +// @SuppressWarnings("unchecked") final ArbitraryLengthPathNode alpNode = (ArbitraryLengthPathNode) child; left = convertArbitraryLengthPath(left, alpNode, doneSet, ctx); continue; } else if (child instanceof ZeroLengthPathNode) { - @SuppressWarnings("unchecked") +// @SuppressWarnings("unchecked") final ZeroLengthPathNode zlpNode = (ZeroLengthPathNode) child; left = convertZeroLengthPath(left, zlpNode, doneSet, ctx); continue; @@ -2588,7 +2608,7 @@ } continue; } else if (child instanceof UnionNode) { - @SuppressWarnings("unchecked") +// @SuppressWarnings("unchecked") final UnionNode unionNode = (UnionNode) child; left = convertUnion(left, unionNode, doneSet, ctx); continue; @@ -3955,7 +3975,7 @@ * DataSetJoin with an "inline" access path.) */ @SuppressWarnings("rawtypes") - private static final Predicate toPredicate(final StatementPatternNode sp, + protected static final Predicate toPredicate(final StatementPatternNode sp, final AST2BOpContext ctx) { final QueryRoot query = ctx.astContainer.getOptimizedAST(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-23 22:42:33
|
Revision: 7690 http://bigdata.svn.sourceforge.net/bigdata/?rev=7690&view=rev Author: thompsonbry Date: 2013-12-23 22:42:23 +0000 (Mon, 23 Dec 2013) Log Message: ----------- javadoc and @Override Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IPassesMaterialization.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java 2013-12-22 20:17:22 UTC (rev 7689) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java 2013-12-23 22:42:23 UTC (rev 7690) @@ -75,6 +75,7 @@ * <p> * {@inheritDoc} */ + @Override public CoreBaseBOp clone() { final Class<? extends CoreBaseBOp> cls = getClass(); final Constructor<? extends CoreBaseBOp> ctor; @@ -98,6 +99,7 @@ * General contract is a short (non-recursive) representation of the * {@link BOp}. */ + @Override public String toShortString() { final BOp t = this; if (t instanceof IValueExpression<?> @@ -125,6 +127,7 @@ * Return a non-recursive representation of the arguments and annotations * for this {@link BOp}. */ + @Override public String toString() { final StringBuilder sb = new StringBuilder(); @@ -181,6 +184,7 @@ } } + @Override final public Object getRequiredProperty(final String name) { final Object tmp = getProperty(name); @@ -193,6 +197,7 @@ } + @Override @SuppressWarnings("unchecked") final public <T> T getProperty(final String name, final T defaultValue) { @@ -229,18 +234,22 @@ } + @Override final public int getId() { return (Integer) getRequiredProperty(Annotations.BOP_ID); } - + + @Override final public boolean isController() { - - return getProperty(Annotations.CONTROLLER, false); - + + return getProperty(Annotations.CONTROLLER, + Annotations.DEFAULT_CONTROLLER); + } + @Override final public BOpEvaluationContext getEvaluationContext() { return getProperty(Annotations.EVALUATION_CONTEXT, @@ -251,6 +260,7 @@ /** * <code>true</code> if all arguments and annotations are the same. */ + @Override public boolean equals(final Object other) { if (this == other) @@ -378,6 +388,7 @@ /** * The hash code is based on the hash of the operands (cached). */ + @Override public int hashCode() { int h = hash; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IPassesMaterialization.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IPassesMaterialization.java 2013-12-22 20:17:22 UTC (rev 7689) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IPassesMaterialization.java 2013-12-23 22:42:23 UTC (rev 7690) @@ -24,11 +24,9 @@ */ package com.bigdata.rdf.internal.constraints; - - /** * Some {@link IVValueExpression} evaluate to one of their - * arguments, and if a INeedsEvaluation is in the expressions + * arguments, and if a {@link INeedsMaterialization} is in the expressions * parent path, the parent needs to materialize the expression's arguments. */ public interface IPassesMaterialization { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-22 20:17:32
|
Revision: 7689 http://bigdata.svn.sourceforge.net/bigdata/?rev=7689&view=rev Author: thompsonbry Date: 2013-12-22 20:17:22 +0000 (Sun, 22 Dec 2013) Log Message: ----------- Reduced the "explain" statistics to a simple table and added an "explain=details" option to provide all of the available detail. The cluster oriented statistics are no longer displayed unless you are running a scale-out cluster. The mutation statistics are no longer displayed for queries (they really only apply to inference rules). The simple view has the following columns: queryId deadline elapsed cause evalOrder bopSummary predSummary nvars fastRangeCount sumMillis unitsIn unitsOut typeErrors joinRatio In fact, this really should be replaced by parameterized styling of the XHTML result page to render the appropriate level of detail for the user rather than doing this in the server side of the code. When we do this, we can just specify detailedStats=clusterStats=mutationStats=true to get all the data into the page and then style the page to render only the bits that are relevant to the user. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2013-12-20 13:17:53 UTC (rev 7688) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2013-12-22 20:17:22 UTC (rev 7689) @@ -1,1405 +1,1531 @@ -/* - -Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ -/* - * Created on Jun 22, 2009 - */ - -package com.bigdata.bop.engine; - -import java.io.IOException; -import java.io.Writer; -import java.text.DateFormat; -import java.util.Arrays; -import java.util.Date; -import java.util.Iterator; -import java.util.Map; -import java.util.UUID; - -import org.apache.log4j.Logger; - -import com.bigdata.bop.BOp; -import com.bigdata.bop.BOpUtility; -import com.bigdata.bop.IPredicate; -import com.bigdata.bop.IQueryAttributes; -import com.bigdata.bop.IVariable; -import com.bigdata.bop.IVariableOrConstant; -import com.bigdata.bop.NamedSolutionSetRef; -import com.bigdata.bop.controller.INamedSolutionSetRef; -import com.bigdata.bop.controller.NamedSetAnnotations; -import com.bigdata.bop.engine.RunState.RunStateEnum; -import com.bigdata.bop.join.IHashJoinUtility; -import com.bigdata.bop.join.PipelineJoin; -import com.bigdata.bop.join.PipelineJoinStats; -import com.bigdata.bop.rdf.join.ChunkedMaterializationOp; -import com.bigdata.counters.render.XHTMLRenderer; -import com.bigdata.rawstore.Bytes; -import com.bigdata.rdf.sparql.ast.eval.AST2BOpJoins; -import com.bigdata.striterator.IKeyOrder; - -/** - * Class defines the log on which summary operator execution statistics are - * written. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id: RuleLog.java 3448 2010-08-18 20:55:58Z thompsonbry $ - */ -public class QueryLog { - - private static final String NA = "N/A"; - private static final String TD = "<td>"; - private static final String TDx = "</td\n>"; -// // the symbol used when a count is zero. -// private static final String ZE = "0"; -// -//// // the symbol used when a count was zero, so count/sec is also zero. -//// final String NA = "0"; -// -// // the symbol used when the elapsed time was zero, so count/sec is divide by zero. -// private static final String DZ = "0"; - - protected static final transient Logger log = Logger - .getLogger(QueryLog.class); - - static { - logTableHeader(); - } - - static public void logTableHeader() { - if(log.isInfoEnabled()) - log.info(QueryLog.getTableHeader()); - } - - /** - * A single buffer is reused to keep down the heap churn. - */ - final private static StringBuilder sb = new StringBuilder( - Bytes.kilobyte32 * 4); - - /** - * Log rule execution statistics. - * - * @param q - * The running query. - */ - static public void log(final IRunningQuery q) { - - if (log.isInfoEnabled()) { - - try { - - final IRunningQuery[] children = (q instanceof AbstractRunningQuery) ? ((AbstractRunningQuery) q) - .getChildren() : null; - - /* - * Note: We could use a striped lock here over a small pool of - * StringBuilder's to decrease contention for the single buffer - * while still avoiding heap churn for buffer allocation. Do - * this if the monitor for this StringBuilder shows up as a hot - * spot when query logging is enabled. - */ - synchronized (sb) { - - // clear the buffer. - sb.setLength(0); - - { - final Map<Integer/* bopId */, QueueStats> queueStats = ((ChunkedRunningQuery) q) - .getQueueStats(); - - logSummaryRow(q, queueStats, sb); - - logDetailRows(q, queueStats, sb); - } - - if (children != null) { - - for (int i = 0; i < children.length; i++) { - - final IRunningQuery c = children[i]; - - final Map<Integer/* bopId */, QueueStats> queueStats = ((ChunkedRunningQuery) c) - .getQueueStats(); - - logSummaryRow(c, queueStats, sb); - - logDetailRows(c, queueStats, sb); - - } - - } - - log.info(sb); - - } - - } catch (RuntimeException t) { - - log.error(t,t); - - } - - } - - } - -// /** -// * Log the query. -// * -// * @param q -// * The query. -// * @param sb -// * Where to write the log message. -// */ -// static public void log(final boolean includeTableHeader, -// final IRunningQuery q, final StringBuilder sb) { -// -// if(includeTableHeader) { -// -// sb.append(getTableHeader()); -// -// } -// -// logDetailRows(q, sb); -// -// logSummaryRow(q, sb); -// -// } - - /** - * Log a detail row for each operator in the query. - */ - static private void logDetailRows(final IRunningQuery q, - final Map<Integer/* bopId */, QueueStats> queueStats, - final StringBuilder sb) { - - final Integer[] order = BOpUtility.getEvaluationOrder(q.getQuery()); - - int orderIndex = 0; - - for (Integer bopId : order) { - - sb.append(getTableRow(q, orderIndex, bopId, false/* summary */, - queueStats)); - -// sb.append('\n'); - - orderIndex++; - - } - - } - - /** - * Log a summary row for the query. - */ - static private void logSummaryRow(final IRunningQuery q, - final Map<Integer/* bopId */, QueueStats> queueStats, - final StringBuilder sb) { - - sb.append(getTableRow(q, -1/* orderIndex */, q.getQuery().getId(), - true/* summary */, queueStats)); - -// sb.append('\n'); - - } - - static private String getTableHeader() { - - final StringBuilder sb = new StringBuilder(); - - /* - * Common columns for the overall query and for each pipeline operator. - */ - sb.append("queryId"); -// sb.append("\ttag"); - sb.append("\tbeginTime"); - sb.append("\tdoneTime"); - sb.append("\tdeadline"); - sb.append("\telapsed"); - sb.append("\tserviceId"); - sb.append("\tcause"); - sb.append("\tbop"); - /* - * Columns for each pipeline operator. - */ - sb.append("\tevalOrder"); // [0..n-1] - sb.append("\tevalContext"); - sb.append("\tcontroller"); - sb.append("\tbopId"); - sb.append("\tpredId"); - sb.append("\tbopSummary"); // short form of the bop. - sb.append("\tpredSummary"); // short form of the pred. - // metadata considered by the static optimizer. - sb.append("\tstaticBestKeyOrder"); // original key order assigned by static optimizer. - sb.append("\toverrideKeyOrder"); // key order iff explicitly overridden. - sb.append("\tnvars"); // #of variables in the predicate for a join. - sb.append("\tfastRangeCount"); // fast range count used by the static optimizer. - // dynamics (aggregated for totals as well). - sb.append("\trunState"); // true iff the operator will not be evaluated again. - sb.append("\tsumMillis"); // cumulative milliseconds for eval of this operator. - sb.append("\topCount"); // cumulative #of invocations of tasks for this operator. - sb.append("\tnumRunning");// #of concurrent invocations of the operator (current value) - sb.append("\tfanOut"); // #of shards/nodes on which the operator has started. - sb.append("\tqueueShards"); // #of shards with work queued for this operator. - sb.append("\tqueueChunks"); // #of chunks queued for this operator. - sb.append("\tqueueSolutions"); // #of solutions queued for this operator. - sb.append("\tchunksIn"); - sb.append("\tunitsIn"); - sb.append("\tunitsInPerChunk"); // average #of solutions in per chunk. - sb.append("\tchunksOut"); - sb.append("\tunitsOut"); - sb.append("\tunitsOutPerChunk"); // average #of solutions out per chunk. - sb.append("\tmutationCount"); - sb.append("\ttypeErrors"); - sb.append("\tjoinRatio"); // expansion rate multipler in the solution count. - sb.append("\taccessPathDups"); - sb.append("\taccessPathCount"); - sb.append("\taccessPathRangeCount"); - sb.append("\taccessPathChunksIn"); - sb.append("\taccessPathUnitsIn"); - // dynamics based on elapsed wall clock time. - sb.append("\tsolutions/ms"); - sb.append("\tmutations/ms"); - // - // cost model(s) - // - sb.append('\n'); - - return sb.toString(); - - } - - /** - * Return a tabular representation of the query {@link RunState}. - * - * @param q - * The {@link IRunningQuery}. - * @param evalOrder - * The evaluation order for the operator. - * @param bopId - * The identifier for the operator. - * @param summary - * <code>true</code> iff the summary for the query should be - * written. - * - * @return The row of the table. - */ - static private String getTableRow(final IRunningQuery q, - final int evalOrder, final Integer bopId, final boolean summary, - final Map<Integer/*bopId*/,QueueStats> queueStats) { - - final StringBuilder sb = new StringBuilder(); - - final DateFormat dateFormat = DateFormat.getDateTimeInstance( - DateFormat.FULL, DateFormat.FULL); - - // The elapsed time for the query (wall time in milliseconds). - final long elapsed = q.getElapsed(); - - // The serviceId on which the query is running : null unless scale-out. - final UUID serviceId = q.getQueryEngine().getServiceUUID(); - - // The thrown cause : null unless the query was terminated abnormally. - final Throwable cause = q.getCause(); - - sb.append(q.getQueryId()); - sb.append('\t'); -// sb.append(q.getQuery().getProperty(QueryHints.TAG, -// QueryHints.DEFAULT_TAG)); -// sb.append('\t'); - sb.append(dateFormat.format(new Date(q.getStartTime()))); - sb.append('\t'); - sb.append(dateFormat.format(new Date(q.getDoneTime()))); - sb.append('\t'); - if(q.getDeadline()!=Long.MAX_VALUE) - sb.append(dateFormat.format(new Date(q.getDeadline()))); - sb.append('\t'); - sb.append(elapsed); - sb.append('\t'); - sb.append(serviceId == null ? NA : serviceId.toString()); - sb.append('\t'); - if (cause != null) - sb.append(cause.getLocalizedMessage()); - - final Map<Integer, BOp> bopIndex = q.getBOpIndex(); - final Map<Integer, BOpStats> statsMap = q.getStats(); - final BOp bop = bopIndex.get(bopId); - - // the operator. - sb.append('\t'); - if (summary) { - /* - * The entire query (recursively). New lines are translated out to - * keep this from breaking the table format. - */ - sb.append(BOpUtility.toString(q.getQuery()).replace('\n', ' ')); - sb.append('\t'); // evalOrder - sb.append("total"); - sb.append('\t'); // evaluation context - sb.append('\t'); // controller annotation. - sb.append('\t'); // bopId - sb.append("total"); - } else { - // Otherwise show just this bop. - sb.append(bopIndex.get(bopId).toString()); - sb.append('\t'); - sb.append(evalOrder); // eval order for this bop. - sb.append('\t'); - sb.append(bop.getEvaluationContext()); - sb.append('\t'); - sb.append(bop.getProperty(BOp.Annotations.CONTROLLER, - BOp.Annotations.DEFAULT_CONTROLLER)); - sb.append('\t'); - sb.append(Integer.toString(bopId)); - } - - sb.append('\t'); - @SuppressWarnings("rawtypes") - final IPredicate pred = (IPredicate<?>) bop - .getProperty(PipelineJoin.Annotations.PREDICATE); - final Integer predId = pred == null ? null : (Integer) pred - .getProperty(BOp.Annotations.BOP_ID); - if (predId != null) { - sb.append(predId); - } else { - if (pred != null) { - // Expected but missing. - sb.append(NA); - } - } - sb.append('\t'); - // bopSummary - if (summary) { - sb.append("total"); - } else { - sb.append(bop.getClass().getSimpleName()); - sb.append("[" + bopId + "]"); - } - sb.append('\t'); - if (pred != null) { - sb.append(pred.getClass().getSimpleName()); - sb.append("[" + predId + "]("); - final Iterator<BOp> itr = pred.argIterator(); - boolean first = true; - while (itr.hasNext()) { - if (first) { - first = false; - } else - sb.append(", "); - final IVariableOrConstant<?> x = (IVariableOrConstant<?>) itr - .next(); - if (x.isVar()) { - sb.append("?"); - sb.append(x.getName()); - } else { - sb.append(x.get()); - //sb.append(((IV)x.get()).getValue()); - } - } - sb.append(")"); - } - if (bop.getProperty(NamedSetAnnotations.NAMED_SET_REF) != null) { - /* - * Named Solution Set(s) summary. - */ - final Object namedSetRef = bop - .getProperty(NamedSetAnnotations.NAMED_SET_REF); - if (namedSetRef instanceof INamedSolutionSetRef) { - final INamedSolutionSetRef ref = (INamedSolutionSetRef) namedSetRef; - final IRunningQuery t = getRunningQuery(q, ref.getQueryId()); - if (t != null) { - final IQueryAttributes attrs = t == null ? null : t - .getAttributes(); - final IHashJoinUtility state = (IHashJoinUtility) (attrs == null ? null - : attrs.get(ref)); - if (state != null) { - // Prefer the IHashUtilityState - sb.append(state.toString()); - } else { - // Otherwise the NamedSolutionSetRef - sb.append(ref.toString()); - } - // sb.append(", joinvars=" + Arrays.toString(ref.joinVars)); - } - } else { - final INamedSolutionSetRef[] refs = (INamedSolutionSetRef[]) namedSetRef; - for (int i = 0; i < refs.length; i++) { - final INamedSolutionSetRef ref = refs[i]; - if (i > 0) - sb.append(","); - final IRunningQuery t = getRunningQuery(q, ref.getQueryId()); - if (t != null) { - final IQueryAttributes attrs = t == null ? null : t - .getAttributes(); - final IHashJoinUtility state = (IHashJoinUtility) (attrs == null ? null - : attrs.get(ref)); - if (state != null) { - // Prefer the IHashUtilityState - sb.append(state.toString()); - sb.append(cdata(",namedSet=")); - sb.append(cdata(ref.getLocalName())); - } else { - // Otherwise the NamedSolutionSetRef - sb.append(ref.toString()); - } - } - // sb.append(", joinvars=" + - // Arrays.toString(refs[0].joinVars)); - } - } - } - if (bop instanceof ChunkedMaterializationOp) { - final IVariable<?>[] vars = (IVariable<?>[]) bop - .getProperty(ChunkedMaterializationOp.Annotations.VARS); - sb.append(Arrays.toString(vars)); - } - - /* - * Static optimizer metadata. - * - * FIXME Should report [nvars] be the expected asBound #of variables - * given the assigned evaluation order and the expectation of propagated - * bindings (optionals may leave some unbound). - */ - { - - if (pred != null) { - - // Static optimizer key order (if run). - final IKeyOrder<?> keyOrder = (IKeyOrder<?>) pred - .getProperty(AST2BOpJoins.Annotations.ORIGINAL_INDEX); - - // Explicit override of the key order (if given). - final Object overrideKeyOrder = pred - .getProperty(IPredicate.Annotations.KEY_ORDER); - - final Long rangeCount = (Long) pred - .getProperty(AST2BOpJoins.Annotations.ESTIMATED_CARDINALITY); - - sb.append('\t'); // keyorder - if (keyOrder != null) - sb.append(keyOrder); - - sb.append('\t'); // keyorder override. - if (overrideKeyOrder != null) - sb.append(overrideKeyOrder.toString()); - - sb.append('\t'); // nvars - if (keyOrder != null) - sb.append(pred.getVariableCount(keyOrder)); - - sb.append('\t'); // rangeCount - if (rangeCount!= null) - sb.append(rangeCount); - - } else { - sb.append('\t'); // keyorder (static optimizer) - sb.append('\t'); // keyorder (override) - sb.append('\t'); // nvars - sb.append('\t'); // rangeCount - } - } - - /* - * Dynamics. - */ - - final int fanOut = ((AbstractRunningQuery) q).getStartedOnCount(bopId); - - final long numRunning = ((AbstractRunningQuery) q) - .getRunningCount(bopId); - - final PipelineJoinStats stats = new PipelineJoinStats(); - if(summary) { - // Aggregate the statistics for all pipeline operators. - for (BOpStats t : statsMap.values()) { - stats.add(t); - } - } else { - // Just this operator. - final BOpStats tmp = statsMap.get(bopId); - if (tmp != null) - stats.add(tmp); - } - final long unitsIn = stats.unitsIn.get(); - final long unitsOut = stats.unitsOut.get(); - - sb.append('\t'); - if (bop != null) { -// if (stats.opCount.get() == 0) -// sb.append("NotStarted"); -// else - // Note: This requires a lock! -// final RunStateEnum runState = ((AbstractRunningQuery) q) -// .getRunState(bopId); - // Note: Barges in if possible, but does not wait for a lock. - final RunStateEnum runState = ((AbstractRunningQuery) q) - .tryGetRunState(bopId); - sb.append(runState == null ? NA : runState.name()); - } else { - sb.append(NA); - } - - sb.append('\t'); - sb.append(stats.elapsed.get()); - sb.append('\t'); - sb.append(stats.opCount.get()); - sb.append('\t'); - sb.append(Long.toString(numRunning)); - sb.append('\t'); - sb.append(Integer.toString(fanOut)); - { - final QueueStats tmp = queueStats == null ? null : queueStats - .get(bopId); - if (tmp != null) { - sb.append('\t'); - sb.append(tmp.shardSet.size()); // aka #of work queues. - sb.append('\t'); - sb.append(tmp.chunkCount); - sb.append('\t'); - sb.append(tmp.solutionCount); - } else { - sb.append('\t'); - sb.append('\t'); - sb.append('\t'); - } - } - sb.append('\t'); - sb.append(stats.chunksIn.get()); - sb.append('\t'); - sb.append(stats.unitsIn.get()); - sb.append('\t'); - sb.append(Double.toString(avg(stats.unitsIn.get(), stats.chunksIn.get()))); - sb.append('\t'); - sb.append(stats.chunksOut.get()); - sb.append('\t'); - sb.append(stats.unitsOut.get()); - sb.append('\t'); - sb.append(Double.toString(avg(stats.unitsOut.get(), stats.chunksOut.get()))); - sb.append('\t'); - sb.append(stats.mutationCount.get()); - sb.append('\t'); - sb.append(stats.typeErrors.get()); - sb.append('\t'); - sb.append(unitsIn == 0 ? NA : unitsOut / (double) unitsIn); - sb.append('\t'); - sb.append(stats.accessPathDups.get()); - sb.append('\t'); - sb.append(stats.accessPathCount.get()); - sb.append('\t'); - sb.append(stats.accessPathRangeCount.get()); - sb.append('\t'); - sb.append(stats.accessPathChunksIn.get()); - sb.append('\t'); - sb.append(stats.accessPathUnitsIn.get()); - - /* - * Use the total elapsed time for the query (wall time). - */ - // solutions/ms - sb.append('\t'); - sb.append(elapsed == 0 ? 0 : stats.unitsOut.get() / elapsed); - // mutations/ms - sb.append('\t'); - sb.append(elapsed == 0 ? 0 : stats.mutationCount.get() / elapsed); - - sb.append('\n'); - - return sb.toString(); - - } - - /** - * Format the data as an (X)HTML table. The table will include a header - * which declares the columns, a detail row for each operator (optional), - * and a summary row for the query as a whole. - * - * @param queryStr - * The original text of the query (e.g., a SPARQL query) - * (optional). - * @param q - * The {@link IRunningQuery}. - * @param children - * The child query(s) -or- <code>null</code> if they are not to - * be displayed. - * @param w - * Where to write the table. - * @param summaryOnly - * When <code>true</code> only the summary row will be written. - * @param maxBopLength - * The maximum length to display from {@link BOp#toString()} and - * ZERO (0) to display everything. Data longer than this value - * will be accessible from a flyover, but not directly visible in - * the page. - * @throws IOException - */ - public static void getTableXHTML(// - final String queryStr,// - final IRunningQuery q,// - final IRunningQuery[] children,// - final Writer w, final boolean summaryOnly, - final int maxBopLength) - throws IOException { - - // the table start tag. - w.write("<table border=\"1\" summary=\"" + attrib("Query Statistics") - + "\"\n>"); - - getTableHeaderXHTML(w); - - // Main query. - { - - final Map<Integer/* bopId */, QueueStats> queueStats = ((ChunkedRunningQuery) q) - .getQueueStats(); - - // Summary first. - getSummaryRowXHTML(queryStr, q, w, queueStats, maxBopLength); - - if (!summaryOnly) { - - // Then the detail rows. - getTableRowsXHTML(queryStr, q, w, queueStats, maxBopLength); - - } - - } - - if (!summaryOnly) { - - // Then the children too. - - if (children != null) { - - for (int i = 0; i < children.length; i++) { - - final IRunningQuery c = children[i]; - - // Repeat the header so we can recognize what follows as a - // child query. - getTableHeaderXHTML(w); - - { - // Work queue summary for the child query. - final Map<Integer/* bopId */, QueueStats> queueStats = ((ChunkedRunningQuery) c) - .getQueueStats(); - - // Summary first. - getSummaryRowXHTML(null/* queryStr */, c, w, - queueStats, maxBopLength); - - // Then the detail rows. - getTableRowsXHTML(null/* queryStr */, c, w, queueStats, - maxBopLength); - - } - - } - - } - - } - - w.write("</table\n>"); - - } - - public static void getTableHeaderXHTML(final Writer w) - throws IOException { - - // header row. - w.write("<tr\n>"); - /* - * Common columns for the overall query and for each pipeline operator. - */ - w.write("<th>queryId</th>"); -// w.write("<th>tag</th>"); - w.write("<th>beginTime</th>"); - w.write("<th>doneTime</th>"); - w.write("<th>deadline</th>"); - w.write("<th>elapsed</th>"); - w.write("<th>serviceId</th>"); - w.write("<th>cause</th>"); -// w.write("<th>query</th>"); -// w.write("<th>bop</th>"); - /* - * Columns for each pipeline operator. - */ - w.write("<th>evalOrder</th>"); // [0..n-1] - w.write("<th>evalContext</th>"); - w.write("<th>controller</th>"); - w.write("<th>bopId</th>"); - w.write("<th>predId</th>"); - w.write("<th>bopSummary</th>"); - w.write("<th>predSummary</th>"); - // metadata considered by the static optimizer. - w.write("<th>staticBestKeyOrder</th>"); // original key order assigned - // by static optimizer. - w.write("<th>overriddenKeyOrder</th>"); // explicit key order override. - w.write("<th>nvars</th>"); // #of variables in the predicate for a join. - w.write("<th>fastRangeCount</th>"); // fast range count used by the - // static optimizer. - // dynamics (aggregated for totals as well). - w.write("<th>runState</th>"); - w.write("<th>sumMillis</th>"); // cumulative milliseconds for eval of this operator. - w.write("<th>opCount</th>"); // cumulative #of invocations of tasks for this operator. - w.write("<th>numRunning</th>"); // #of concurrent invocations of the operator (current value) - w.write("<th>fanOut</th>"); // #of shards/nodes on which the operator has started. - w.write("<th>queueShards</th>"); // #of shards with work queued for this operator. - w.write("<th>queueChunks</th>"); // #of chunks queued for this operator. - w.write("<th>queueSolutions</th>"); // #of solutions queued for this operator. - w.write("<th>chunksIn</th>"); - w.write("<th>unitsIn</th>"); - w.write("<th>unitsInPerChunk</th>"); // average #of solutions in per chunk. - w.write("<th>chunksOut</th>"); - w.write("<th>unitsOut</th>"); - w.write("<th>unitsOutPerChunk</th>"); // average #of solutions out per chunk. - w.write("<th>mutationCount</th>"); - w.write("<th>typeErrors</th>"); - w.write("<th>joinRatio</th>"); // expansion rate multiplier in the solution count. - w.write("<th>accessPathDups</th>"); - w.write("<th>accessPathCount</th>"); - w.write("<th>accessPathRangeCount</th>"); - w.write("<th>accessPathChunksIn</th>"); - w.write("<th>accessPathUnitsIn</th>"); - // dynamics based on elapsed wall clock time. - w.write("<th>");w.write(cdata("solutions/ms"));w.write("</th>"); - w.write("<th>");w.write(cdata("mutations/ms"));w.write("</th>"); - // - // cost model(s) - // - w.write("</tr\n>"); - - } - - /** - * Write the table rows. - * - * @param queryStr - * The query text (optional). - * @param q - * The {@link IRunningQuery}. - * @param w - * Where to write the rows. - * @param maxBopLength - * The maximum length to display from {@link BOp#toString()} and - * ZERO (0) to display everything. Data longer than this value - * will be accessible from a flyover, but not directly visible in - * the page. - * - * @throws IOException - */ - public static void getTableRowsXHTML(final String queryStr, - final IRunningQuery q, final Writer w, - final Map<Integer/* bopId */, QueueStats> queueStats, - final int maxBopLength) - throws IOException { - - final Integer[] order = BOpUtility.getEvaluationOrder(q.getQuery()); - - int orderIndex = 0; - - for (Integer bopId : order) { - - getTableRowXHTML(queryStr, q, w, orderIndex, bopId, - false/* summary */, queueStats, maxBopLength); - - orderIndex++; - - } - - } - - /** - * Return a tabular representation of the query {@link RunState}. - * - * @param queryStr - * The query text (optional). - * @param q - * The {@link IRunningQuery}. - * @param evalOrder - * The evaluation order for the operator. - * @param bopId - * The identifier for the operator. - * @param summary - * <code>true</code> iff the summary for the query should be - * written. - * @param maxBopLength - * The maximum length to display from {@link BOp#toString()} and - * ZERO (0) to display everything. Data longer than this value - * will be accessible from a flyover, but not directly visible - * in the page. - * - * @return The row of the table. - */ - static private void getTableRowXHTML(final String queryStr, - final IRunningQuery q, final Writer w, final int evalOrder, - final Integer bopId, final boolean summary, - final Map<Integer/* bopId */, QueueStats> queueStats, - final int maxBopLength) - throws IOException { - - final DateFormat dateFormat = DateFormat.getDateTimeInstance( - DateFormat.FULL, DateFormat.FULL); - - // The elapsed time for the query (wall time in milliseconds). - final long elapsed = q.getElapsed(); - - // The serviceId on which the query is running : null unless scale-out. - final UUID serviceId = q.getQueryEngine().getServiceUUID(); - - // The thrown cause : null unless the query was terminated abnormally. - final Throwable cause = q.getCause(); - - w.write("<tr\n>"); - w.write(TD + cdata(q.getQueryId().toString()) + TDx); -// w.write(TD -// + cdata(q.getQuery().getProperty(QueryHints.TAG, -// QueryHints.DEFAULT_TAG)) + TDx); - w.write(TD + dateFormat.format(new Date(q.getStartTime())) + TDx); - w.write(TD + cdata(dateFormat.format(new Date(q.getDoneTime()))) + TDx); - w.write(TD); - if (q.getDeadline() != Long.MAX_VALUE) - w.write(cdata(dateFormat.format(new Date(q.getDeadline())))); - w.write(TDx); - w.write(TD + cdata(Long.toString(elapsed)) + TDx); - w.write(TD); w.write(cdata(serviceId == null ? NA : serviceId.toString()));w.write(TDx); - w.write(TD); - if (cause != null) - w.write(cause.getLocalizedMessage()); - w.write(TDx); - - final Map<Integer, BOp> bopIndex = q.getBOpIndex(); - final Map<Integer, BOpStats> statsMap = q.getStats(); - final BOp bop = bopIndex.get(bopId); - - // the operator. - if (summary) { -// // The query string (SPARQL). -// w.write(TD); -// w.write(queryStr == null ? cdata(NA) : prettyPrintSparql(queryStr)); -// w.write(TDx); -// // The query plan (BOPs) -// { -// w.write(TD); -// final String bopStr = BOpUtility.toString(q.getQuery()); -// if (maxBopLength == 0 || bopStr.length() <= maxBopLength) { -// // The entire query plan. -// w.write(cdata(bopStr)); -// } else { -// // A slice of the query plan. -// w.write("<a href=\"#\" title=\""); -// w.write(attrib(bopStr));// the entire query as a tooltip. -// w.write("\"\n>"); -// w.write(cdata(bopStr.substring(0/* begin */, Math.min( -// maxBopLength, bopStr.length())))); -// w.write("..."); -// w.write("</a>"); -// } -// w.write(TDx); -// } - w.write(TD); - w.write("total"); // evalOrder - w.write(TDx); - w.write(TD); w.write(TDx); // evalContext - w.write(TD); w.write(TDx); // controller? - w.write(TD); - w.write("total"); // bopId - w.write(TDx); - } else { -// // The query string (SPARQL). -// w.write(TD); -// w.write("...");// elide the original query string on a detail row. -// w.write(TDx); -// // The query plan (BOPs) -// { -// w.write(TD); -// final String bopStr = bopIndex.get(bopId).toString(); -// if (maxBopLength == 0 || bopStr.length() <= maxBopLength) { -// // The entire query plan. -// w.write(cdata(bopStr)); -// } else { -// // A slice of the query plan. -// w.write("<a href=\"#\" title=\""); -// w.write(attrib(bopStr));// the entire query as a tooltip. -// w.write("\"\n>"); -// // A slice of the query inline on the page. -// w.write(cdata(bopStr.substring(0/* begin */, Math.min( -// maxBopLength, bopStr.length())))); -// w.write("..."); -// w.write("</a>"); -// } -// w.write(TDx); -// } - w.write(TD); - w.write(Integer.toString(evalOrder)); // eval order for this bop. - w.write(TDx); - w.write(TD); - w.write(cdata(bop.getEvaluationContext().toString())); - w.write(TDx); - w.write(TD); - w.write(cdata(bop.getProperty(BOp.Annotations.CONTROLLER, - BOp.Annotations.DEFAULT_CONTROLLER).toString())); - w.write(TDx); - w.write(TD); - w.write(Integer.toString(bopId)); - w.write(TDx); - } - - @SuppressWarnings("rawtypes") - final IPredicate pred = (IPredicate<?>) bop - .getProperty(PipelineJoin.Annotations.PREDICATE); - final Integer predId = pred == null ? null : (Integer) pred - .getProperty(BOp.Annotations.BOP_ID); - w.write(TD); - if (predId != null) { - w.write(cdata(predId.toString())); - } else { - if (pred != null) { - // Expected but missing. - w.write(cdata(NA)); - } - } - w.write(TDx); - - w.write(TD); - if(summary) { - w.write("total"); - } else { - w.write(cdata(bop.getClass().getSimpleName())); - w.write(cdata("[" + bopId + "]")); - } - w.write(TDx); - - w.write(TD); - if (pred != null) { - w.write(cdata(pred.getClass().getSimpleName())); - w.write(cdata("[" + predId + "](")); - final Iterator<BOp> itr = pred.argIterator(); - boolean first = true; - while (itr.hasNext()) { - if (first) { - first = false; - } else - w.write(cdata(", ")); - final IVariableOrConstant<?> x = (IVariableOrConstant<?>) itr - .next(); - if (x.isVar()) { - w.write(cdata("?")); - w.write(cdata(x.getName())); - } else { - w.write(cdata(x.get().toString())); - //sb.append(((IV)x.get()).getValue()); - } - } - w.write(cdata(")")); - } - if (bop.getProperty(NamedSetAnnotations.NAMED_SET_REF) != null) { - /* - * Named Solution Set(s) summary. - */ - final Object namedSetRef = bop - .getProperty(NamedSetAnnotations.NAMED_SET_REF); - if (namedSetRef instanceof INamedSolutionSetRef) { - final INamedSolutionSetRef ref = (INamedSolutionSetRef) namedSetRef; - final IRunningQuery t = getRunningQuery(q, ref.getQueryId()); - if (t != null) { - final IQueryAttributes attrs = t == null ? null : t - .getAttributes(); - final IHashJoinUtility state = (IHashJoinUtility) (attrs == null ? null - : attrs.get(ref)); - if (state != null) { - // Prefer the IHashUtilityState - w.write(cdata(state.toString())); - w.write(cdata(",namedSet=")); - w.write(cdata(ref.getLocalName())); - } else { - // Otherwise the NamedSolutionSetRef - w.write(cdata(ref.toString())); - } - // w.write(cdata(", joinvars=" + - // Arrays.toString(ref.joinVars))); - } - } else { - final INamedSolutionSetRef[] refs = (INamedSolutionSetRef[]) namedSetRef; - for (int i = 0; i < refs.length; i++) { - final INamedSolutionSetRef ref = refs[i]; - if (i > 0) - w.write(cdata(",")); - final IRunningQuery t = getRunningQuery(q, ref.getQueryId()); - if (t != null) { - final IQueryAttributes attrs = t == null ? null : t - .getAttributes(); - final IHashJoinUtility state = (IHashJoinUtility) (attrs == null ? null - : attrs.get(ref)); - if (state != null) { - // Prefer the IHashUtilityState - w.write(cdata(state.toString())); - } else { - // Otherwise the NamedSolutionSetRef - w.write(cdata(ref.toString())); - } - } - // w.write(cdata(", joinvars=" + - // Arrays.toString(refs[0].joinVars))); - } - } - } - if (bop instanceof ChunkedMaterializationOp) { - final IVariable<?>[] vars = (IVariable<?>[]) bop - .getProperty(ChunkedMaterializationOp.Annotations.VARS); - w.write(cdata(Arrays.toString(vars))); - } - w.write(TDx); - - /* - * Static optimizer metadata. - * - * FIXME Should report [nvars] be the expected asBound #of variables - * given the assigned evaluation order and the expectation of propagated - * bindings (optionals may leave some unbound). - */ - { - - if (pred != null) { - - // Static optimizer key order (if run). - final IKeyOrder<?> keyOrder = (IKeyOrder<?>) pred - .getProperty(AST2BOpJoins.Annotations.ORIGINAL_INDEX); - - // Explicit override of the key order (if given). - final Object overrideKeyOrder = pred - .getProperty(IPredicate.Annotations.KEY_ORDER); - - final Long rangeCount = (Long) pred - .getProperty(AST2BOpJoins.Annotations.ESTIMATED_CARDINALITY); - - // keyorder - w.write(TD); - if (keyOrder != null) - w.write(keyOrder.toString()); - w.write(TDx); - - // keyorder - w.write(TD); - if (overrideKeyOrder != null) - w.write(overrideKeyOrder.toString()); - w.write(TDx); - - // nvars - w.write(TD); - if (keyOrder != null) - w.write(Integer.toString(pred.getVariableCount(keyOrder))); - w.write(TDx); - - // rangeCount - w.write(TD); - if (rangeCount != null) - w.write(Long.toString(rangeCount)); - w.write(TDx); - - } else { - // keyorder (static) - w.write(TD); - w.write(TDx); - // keyorder (override) - w.write(TD); - w.write(TDx); - // nvars - w.write(TD); - w.write(TDx); - // rangeCount - w.write(TD); - w.write(TDx); - } - } - - /* - * Dynamics. - */ - - final int fanOut = ((AbstractRunningQuery) q).getStartedOnCount(bopId); - - final long numRunning = ((AbstractRunningQuery) q) - .getRunningCount(bopId); - - final PipelineJoinStats stats = new PipelineJoinStats(); - if(summary) { - // Aggregate the statistics for all pipeline operators. - for (BOpStats t : statsMap.values()) { - stats.add(t); - } - } else { - // Just this operator. - final BOpStats tmp = statsMap.get(bopId); - if (tmp != null) - stats.add(tmp); - } - final long unitsIn = stats.unitsIn.get(); - final long unitsOut = stats.unitsOut.get(); - - w.write(TD); - if (bop != null) { -// if (stats.opCount.get() == 0) -// w.write(cdata("NotStarted")); -// else - // Note: This requires a lock! -// final RunStateEnum runState = ((AbstractRunningQuery) q) -// .getRunState(bopId); - // Note: Barges in if possible, but does not wait for a lock. - final RunStateEnum runState = ((AbstractRunningQuery) q) - .tryGetRunState(bopId); - w.write(cdata(runState == null ? NA : runState.name())); - } else { - w.write(cdata(NA)); - } - w.write(TDx); - - w.write(TD); - w.write(Long.toString(stats.elapsed.get())); - w.write(TDx); - w.write(TD); - w.write(Long.toString(stats.opCount.get())); - w.write(TDx); - w.write(TD); - w.write(Long.toString(numRunning)); - w.write(TDx); - w.write(TD); - w.write(Integer.toString(fanOut)); - w.write(TDx); - { - final QueueStats tmp = queueStats == null ? null : queueStats - .get(bopId); - if (tmp != null) { - w.write(TD); - w.write(Long.toString(tmp.shardSet.size())); - w.write(TDx); - w.write(TD); - w.write(Long.toString(tmp.chunkCount)); - w.write(TDx); - w.write(TD); - w.write(Long.toString(tmp.solutionCount)); - w.write(TDx); - } else { - w.write(TD); - w.write(TDx); - w.write(TD); - w.write(TDx); - w.write(TD); - w.write(TDx); - } - } - w.write(TD); - w.write(Long.toString(stats.chunksIn.get())); - w.write(TDx); - w.write(TD); - w.write(Long.toString(stats.unitsIn.get())); - w.write(TDx); - w.write(TD); - w.write(Double.toString(avg(stats.unitsIn.get(), stats.chunksIn.get()))); - w.write(TDx); - w.write(TD); - w.write(Long.toString(stats.chunksOut.get())); - w.write(TDx); - w.write(TD); - w.write(Long.toString(stats.unitsOut.get())); - w.write(TDx); - w.write(TD); - w.write(Double.toString(avg(stats.unitsOut.get(), stats.chunksOut.get()))); - w.write(TDx); - w.write(TD); - w.write(cdata(Long.toString(stats.mutationCount.get()))); - w.write(TDx); - w.write(TD); - w.write(Long.toString(stats.typeErrors.get())); - w.write(TDx); - w.write(TD); - w.write(cdata(unitsIn == 0 ? NA : Double.toString(unitsOut / (double) unitsIn))); - w.write(TDx); - w.write(TD); - w.write(Long.toString(stats.accessPathDups.get())); - w.write(TDx); - w.write(TD); - w.write(Long.toString(stats.accessPathCount.get())); - w.write(TDx); - w.write(TD); - w.write(Long.toString(stats.accessPathRangeCount.get())); - w.write(TDx); - w.write(TD); - w.write(Long.toString(stats.accessPathChunksIn.get())); - w.write(TDx); - w.write(TD); - w.write(Long.toString(stats.accessPathUnitsIn.get())); - w.write(TDx); - - /* - * Use the total elapsed time for the query (wall time). - */ - // solutions/ms - { - w.write(TD); -// final long solutionCount = stats.unitsOut.get(); -// final String solutionsPerSec = (solutionCount == 0 ? NA // -// : (elapsed == 0L ? DZ // -// : "" + (long) (solutionCount * 1000d / elapsed))); - w.write(cdata(elapsed == 0 ? "0" : Long.toString(stats.unitsOut - .get() / elapsed))); - w.write(TDx); - } - // mutations/ms - { - w.write(TD); - w.write(cdata(elapsed == 0 ? "0" : Long - .toString(stats.mutationCount.get() / elapsed))); - w.write(TDx); - } - w.write("</tr\n>"); - - } - - /** - * Write a summary row for the query. The table element, header, and footer - * must be written separately. - * - * @param queryStr - * The original query text (optional). - * @param q - * The {@link IRunningQuery}. - * @param w - * Where to write the data. - * @param maxBopLength - * The maximum length to display from {@link BOp#toString()} and - * ZERO (0) to display everything. Data longer than this value - * will be accessible from a flyover, but not directly visible in - * the page. - * @throws IOException - */ - static private void getSummaryRowXHTML(final String queryStr, - final IRunningQuery q, final Writer w, - final Map<Integer/* bopId */, QueueStats> queueStats, - final int maxBopLength) throws IOException { - - getTableRowXHTML(queryStr, q, w, -1/* orderIndex */, q.getQuery() - .getId(), true/* summary */, queueStats, maxBopLength); - - } - - private static String cdata(String s) { - - return XHTMLRenderer.cdata(s); - - } - - private static String attrib(String s) { - - return XHTMLRenderer.attrib(s); - - } - -// private static String prettyPrintSparql(String s) { -// -//// return cdata(s); -//// -//// } -// -// s = s.replace("\n", " "); -// -// s = s.replace("PREFIX", "\nPREFIX"); -// s = s.replace("select", "\nselect"); -// s = s.replace("where", "\nwhere"); -// s = s.replace("{","{\n"); -// s = s.replace("}","\n}"); -// s = s.replace(" ."," .\n"); // TODO Must not match within quotes (literals) or <> (URIs). -//// s = s.replace("||","||\n"); -//// s = s.replace("&&","&&\n"); -// -// s = cdata(s); -// -// s = s.replace("\n", "<br>"); -// -//// return "<pre>"+s+"</pre>"; -// -// return s; -// -// } - - /** - * Return <code>x/y</code> unless <code>y:=0</code>, in which case return - * ZERO (0). - * - * @param x - * The numerator. - * @param y - * The denomerator. - * - * @return The average. - */ - static private double avg(final long x, final long y) { - - if (y == 0) - return 0d; - - return x / (double) y; - - } - - /** - * Return the {@link IRunningQuery} for that queryId iff it is available. - * - * @param q - * The query that you already have. - * @param queryId - * The {@link UUID} of the desired query. - * - * @return The {@link IRunningQuery} iff it can be found and otherwise - * <code>null</code>. - */ - static private IRunningQuery getRunningQuery(final IRunningQuery q, - final UUID queryId) { - - if (q.getQueryId().equals(queryId)) { - - /* - * Avoid lookup perils if we already have the right query. - */ - - return q; - - } - - try { - - return q.getQueryEngine().getRunningQuery(queryId); - - } catch (RuntimeException t) { - - // Done and gone. - return null; - - } - - } - -} +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +/* + * Created on Jun 22, 2009 + */ + +package com.bigdata.bop.engine; + +import java.io.IOException; +import java.io.Writer; +import java.text.DateFormat; +import java.util.Arrays; +import java.util.Date; +import java.util.Iterator; +import java.util.Map; +import java.util.UUID; + +import org.apache.log4j.Logger; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.BOpUtility; +import com.bigdata.bop.IPredicate; +import com.bigdata.bop.IQueryAttributes; +import com.bigdata.bop.IVariable; +import com.bigdata.bop.IVariableOrConstant; +import com.bigdata.bop.controller.INamedSolutionSetRef; +import com.bigdata.bop.controller.NamedSetAnnotations; +import com.bigdata.bop.engine.RunState.RunStateEnum; +import com.bigdata.bop.join.IHashJoinUtility; +import com.bigdata.bop.join.PipelineJoin; +import com.bigdata.bop.join.PipelineJoinStats; +import com.bigdata.bop.rdf.join.ChunkedMaterializationOp; +import com.bigdata.counters.render.XHTMLRenderer; +import com.bigdata.rawstore.Bytes; +import com.bigdata.rdf.sparql.ast.eval.AST2BOpJoins; +import com.bigdata.striterator.IKeyOrder; + +/** + * Class defines the log on which summary operator execution statistics are + * written. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: RuleLog.java 3448 2010-08-18 20:55:58Z thompsonbry $ + */ +public class QueryLog { + + private static final String NA = "N/A"; + private static final String TD = "<td>"; + private static final String TDx = "</td\n>"; +// // the symbol used when a count is zero. +// private static final String ZE = "0"; +// +//// // the symbol used when a count was zero, so count/sec is also zero. +//// final String NA = "0"; +// +// // the symbol used when the elapsed time was zero, so count/sec is divide by zero. +// private static final String DZ = "0"; + + protected static final transient Logger log = Logger + .getLogger(QueryLog.class); + + static { + logTableHeader(); + } + + static public void logTableHeader() { + if(log.isInfoEnabled()) + log.info(QueryLog.getTableHeader()); + } + + /** + * A single buffer is reused to keep down the heap churn. + */ + final private static StringBuilder sb = new StringBuilder( + Bytes.kilobyte32 * 4); + + /** + * Log rule execution statistics. + * + * @param q + * The running query. + */ + static public void log(final IRunningQuery q) { + + if (log.isInfoEnabled()) { + + try { + + final IRunningQuery[] children = (q instanceof AbstractRunningQuery) ? ((AbstractRunningQuery) q) + .getChildren() : null; + + /* + * Note: We could use a striped lock here over a small pool of + * StringBuilder's to decrease contention for the single buffer + * while still avoiding heap churn for buffer allocation. Do + * this if the monitor for this StringBuilder shows up as a hot + * spot when query logging is enabled. + */ + synchronized (sb) { + + // clear the buffer. + sb.setLength(0); + + { + final Map<Integer/* bopId */, QueueStats> queueStats = ((ChunkedRunningQuery) q) + .getQueueStats(); + + logSummaryRow(q, queueStats, sb); + + logDetailRows(q, queueStats, sb); + } + + if (children != null) { + + for (int i = 0; i < children.length; i++) { + + final IRunningQuery c = children[i]; + + final Map<Integer/* bopId */, QueueStats> queueStats = ((ChunkedRunningQuery) c) + .getQueueStats(); + + logSummaryRow(c, queueStats, sb); + + logDetailRows(c, queueStats, sb); + + } + + } + + log.info(sb); + + } + + } catch (RuntimeException t) { + + log.error(t,t); + + } + + } + + } + +// /** +// * Log the query. +// * +// * @param q +// * The query. +// * @param sb +// * Where to write the log message. +// */ +// static public void log(final boolean includeTableHeader, +// final IRunningQuery q, final StringBuilder sb) { +// +// if(includeTableHeader) { +// +// sb.append(getTableHeader()); +// +// } +// +// logDetailRows(q, sb); +// +// logSummaryRow(q, sb); +// +// } + + /** + * Log a detail row for each operator in the query. + */ + static private void logDetailRows(final IRunningQuery q, + final Map<Integer/* bopId */, QueueStats> queueStats, + final StringBuilder sb) { + + final Integer[] order = BOpUtility.getEvaluationOrder(q.getQuery()); + + int orderIndex = 0; + + for (Integer bopId : order) { + + sb.append(getTableRow(q, orderIndex, bopId, false/* summary */, + queueStats)); + +// sb.append('\n'); + + orderIndex++; + + } + + } + + /** + * Log a summary row for the query. + */ + static private void logSummaryRow(final IRunningQuery q, + final Map<Integer/* bopId */, QueueStats> queueStats, + final StringBuilder sb) { + + sb.append(getTableRow(q, -1/* orderIndex */, q.getQuery().getId(), + true/* summary */, queueStats)); + +// sb.append('\n'); + + } + + static private String getTableHeader() { + + final StringBuilder sb = new StringBuilder(); + + /* + * Common columns for the overall query and for each pipeline operator. + */ + sb.append("queryId"); +// sb.append("\ttag"); + sb.append("\tbeginTime"); + sb.append("\tdoneTime"); + sb.append("\tdeadline"); + sb.append("\telapsed"); + sb.append("\tserviceId"); + sb.append("\tcause"); + sb.append("\tbop"); + /* + * Columns for each pipeline operator. + */ + sb.append("\tevalOrder"); // [0..n-1] + sb.append("\tevalContext"); + sb.append("\tcontroller"); + sb.append("\tbopId"); + sb.append("\tpredId"); + sb.append("\tbopSummary"); // short form of the bop. + sb.append("\tpredSummary"); // short form of the pred. + // metadata considered by the static optimizer. + sb.append("\tstaticBestKeyOrder"); // original key order assigned by static optimizer. + sb.append("\toverrideKeyOrder"); // key order iff explicitly overridden. + sb.append("\tnvars"); // #of variables in the predicate for a join. + sb.append("\tfastRangeCount"); // fast range count used by the static optimizer. + // dynamics (aggregated for totals as well). + sb.append("\trunState"); // true iff the operator will not be evaluated again. + sb.append("\tsumMillis"); // cumulative milliseconds for eval of this operator. + sb.append("\topCount"); // cumulative #of invocations of tasks for this operator. + sb.append("\tnumRunning");// #of concurrent invocations of the operator (current value) + ... [truncated message content] |
From: <tho...@us...> - 2013-12-20 13:18:02
|
Revision: 7688 http://bigdata.svn.sourceforge.net/bigdata/?rev=7688&view=rev Author: thompsonbry Date: 2013-12-20 13:17:53 +0000 (Fri, 20 Dec 2013) Log Message: ----------- Disabling the test of RMI interrupt behavior pending a response from the river community. It appears that the RMI on the remote service is not interrupted when the local thread executing the RMI is interrupted. To me, this seems like an RMI bug. Modified Paths: -------------- branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServer.java Modified: branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServer.java =================================================================== --- branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServer.java 2013-12-19 22:11:12 UTC (rev 7687) +++ branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServer.java 2013-12-20 13:17:53 UTC (rev 7688) @@ -40,6 +40,7 @@ import net.jini.config.Configuration; import net.jini.core.lookup.ServiceID; +import com.bigdata.BigdataStatics; import com.bigdata.ha.HAGlue; import com.bigdata.ha.HAStatusEnum; import com.bigdata.ha.IndexManagerCallable; @@ -372,11 +373,19 @@ * This unit test setups up a service and then issues an RMI that invokes a * {@link Thread#sleep(long)} method on the service. The thread that issues * the RMI is then interrupted during the sleep. - * - * @throws Exception */ public void test_interruptRMI() throws Exception { + if(!BigdataStatics.runKnownBadTests) { + /** + * FIXME TEST DISABLED. I have written to the river mailing list + * about this test. I am not observing the interrupt of the + * Thread.sleep() on the remote service. I need to figure out if + * that is the expected behavior or if this is an RMI bug. + */ + return; + } + // Start a service. final HAGlue serverA = startA(); @@ -498,10 +507,11 @@ log.warn("Will sleep: millis=" + millis); try { Thread.sleep(millis); + log.warn("Sleep finished normally."); } catch (Throwable t) { + log.error("Exception during sleep: "+t, t); ((HAJournalTest) getIndexManager()).getRemoteImpl() .setLastRootCause(t); - log.error(t, t); throw new RuntimeException(t); } finally { log.warn("Did sleep: millis=" + millis); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-19 22:11:18
|
Revision: 7687 http://bigdata.svn.sourceforge.net/bigdata/?rev=7687&view=rev Author: thompsonbry Date: 2013-12-19 22:11:12 +0000 (Thu, 19 Dec 2013) Log Message: ----------- removed java 7 specific code in test suite./ Modified Paths: -------------- branches/MGC_1_3_0/bigdata/src/test/com/bigdata/ha/pipeline/TestSocketsDirect.java Modified: branches/MGC_1_3_0/bigdata/src/test/com/bigdata/ha/pipeline/TestSocketsDirect.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/test/com/bigdata/ha/pipeline/TestSocketsDirect.java 2013-12-19 22:09:49 UTC (rev 7686) +++ branches/MGC_1_3_0/bigdata/src/test/com/bigdata/ha/pipeline/TestSocketsDirect.java 2013-12-19 22:11:12 UTC (rev 7687) @@ -28,7 +28,7 @@ import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; -import java.net.SocketOption; +//import java.net.SocketOption; import java.nio.ByteBuffer; import java.nio.channels.ClosedChannelException; import java.nio.channels.ServerSocketChannel; @@ -76,74 +76,75 @@ super(name); } - /** - * Writes out the available options for the client and server socket. - * - * @throws IOException - */ - public void testDirectSockets_options() throws IOException { - - // Get a socket addresss for an unused port. - final InetSocketAddress serverAddr = new InetSocketAddress(getPort(0)); - - // First our ServerSocketChannel - final ServerSocketChannel ssc = ServerSocketChannel.open(); - try { - - // bind the ServerSocket to the specified port. - ssc.bind(serverAddr); - - // Now the first Client SocketChannel - final SocketChannel cs = SocketChannel.open(); - try { - /* - * Note: true if connection made. false if connection in - * progress. - */ - final boolean immediate = cs.connect(serverAddr); - if (!immediate) { - // Did not connect immediately, so finish connect now. - if (!cs.finishConnect()) { - fail("Did not connect."); - } - } - - /* - * Write out the client socket options. - */ - log.info("Client:: isOpen=" + cs.isOpen()); - log.info("Client:: isBlocking=" + cs.isBlocking()); - log.info("Client:: isRegistered=" + cs.isRegistered()); - log.info("Client:: isConnected=" + cs.isConnected()); - log.info("Client:: isConnectionPending=" - + cs.isConnectionPending()); - for (SocketOption<?> opt : cs.supportedOptions()) { - log.info("Client:: " + opt + " := " + cs.getOption(opt)); - } - - /* - * Note: We need to use ServerSocketChannel.open() to get access - * to the stream oriented listening interface for the server - * side of the socket. - */ - log.info("Server:: isOpen=" + ssc.isOpen()); - log.info("Server:: isBlocking=" + ssc.isBlocking()); - log.info("Server:: isRegistered=" + ssc.isRegistered()); - for (SocketOption<?> opt : ssc.supportedOptions()) { - log.info("Server:: " + opt + " := " + cs.getOption(opt)); - } - - } finally { - cs.close(); - } - - } finally { - - ssc.close(); - - } - - } + // FIXME RESTORE WHEN WE MOVE TO JAVA 7. +// /** +// * Writes out the available options for the client and server socket. +// * +// * @throws IOException +// */ +// public void testDirectSockets_options() throws IOException { +// +// // Get a socket addresss for an unused port. +// final InetSocketAddress serverAddr = new InetSocketAddress(getPort(0)); +// +// // First our ServerSocketChannel +// final ServerSocketChannel ssc = ServerSocketChannel.open(); +// try { +// +// // bind the ServerSocket to the specified port. +// ssc.bind(serverAddr); +// +// // Now the first Client SocketChannel +// final SocketChannel cs = SocketChannel.open(); +// try { +// /* +// * Note: true if connection made. false if connection in +// * progress. +// */ +// final boolean immediate = cs.connect(serverAddr); +// if (!immediate) { +// // Did not connect immediately, so finish connect now. +// if (!cs.finishConnect()) { +// fail("Did not connect."); +// } +// } +// +// /* +// * Write out the client socket options. +// */ +// log.info("Client:: isOpen=" + cs.isOpen()); +// log.info("Client:: isBlocking=" + cs.isBlocking()); +// log.info("Client:: isRegistered=" + cs.isRegistered()); +// log.info("Client:: isConnected=" + cs.isConnected()); +// log.info("Client:: isConnectionPending=" +// + cs.isConnectionPending()); +// for (SocketOption<?> opt : cs.supportedOptions()) { +// log.info("Client:: " + opt + " := " + cs.getOption(opt)); +// } +// +// /* +// * Note: We need to use ServerSocketChannel.open() to get access +// * to the stream oriented listening interface for the server +// * side of the socket. +// */ +// log.info("Server:: isOpen=" + ssc.isOpen()); +// log.info("Server:: isBlocking=" + ssc.isBlocking()); +// log.info("Server:: isRegistered=" + ssc.isRegistered()); +// for (SocketOption<?> opt : ssc.supportedOptions()) { +// log.info("Server:: " + opt + " := " + cs.getOption(opt)); +// } +// +// } finally { +// cs.close(); +// } +// +// } finally { +// +// ssc.close(); +// +// } +// +// } /** * Simple test of connecting to a server socket and the failure to connect This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-19 22:09:55
|
Revision: 7686 http://bigdata.svn.sourceforge.net/bigdata/?rev=7686&view=rev Author: thompsonbry Date: 2013-12-19 22:09:49 +0000 (Thu, 19 Dec 2013) Log Message: ----------- Modified Name2Addr to report ALL exceptions if ANY of the per-dirty-index checkpoint tasks fails. Modified Paths: -------------- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java 2013-12-19 21:34:30 UTC (rev 7685) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java 2013-12-19 22:09:49 UTC (rev 7686) @@ -36,6 +36,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Locale; import java.util.Properties; @@ -80,6 +81,7 @@ import com.bigdata.rawstore.IRawStore; import com.bigdata.resources.IndexManager; import com.bigdata.resources.ResourceManager; +import com.bigdata.util.concurrent.ExecutionExceptions; import com.ibm.icu.text.Collator; import cutthecrap.utils.striterators.Filter; @@ -738,6 +740,7 @@ } // for each entry in the snapshot of the commit list. + final List<Throwable> causes = new LinkedList<Throwable>(); for (Future<CommitIndexTask> f : futures) { try { @@ -775,11 +778,13 @@ } catch (InterruptedException e) { - throw new RuntimeException(e); + log.error("l.name: " + e, e); + causes.add(e); } catch (ExecutionException e) { - throw new RuntimeException(e); + log.error("l.name: " + e, e); + causes.add(e); } @@ -842,6 +847,17 @@ // // set commitTime on the btree (transient field). // l.btree.setLastCommitTime(commitTime); + } // next Future. + + /* + * If there were any errors, then throw an exception listing them. + */ + if (!causes.isEmpty()) { + // Throw exception back to the leader. + if (causes.size() == 1) + throw new RuntimeException(causes.get(0)); + throw new RuntimeException("nerrors=" + causes.size(), + new ExecutionExceptions(causes)); } // and flushes out this btree as well. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-19 21:34:42
|
Revision: 7685 http://bigdata.svn.sourceforge.net/bigdata/?rev=7685&view=rev Author: thompsonbry Date: 2013-12-19 21:34:30 +0000 (Thu, 19 Dec 2013) Log Message: ----------- Merging in changes from the main development branch. This will catch up the feature branch with the 1.3.0 release. Among other things, this will clear up CI quite a bit. Note: The HASendService will not use small chunks in this commit - the code is there, but it needs to be enabled by hand for debuging. At revision r7684 in MGC_1_3_0. {{{ merge https://bigdata.svn.sourceforge.net/svnroot/bigdata/branches/BIGDATA_RELEASE_1_3_0 /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC --- Merging r7607 through r7684 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServer.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumImpl.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-jini/src/java/com/bigdata/journal/jini/ha/NoSnapshotPolicy.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-jini/src/java/com/bigdata/journal/jini/ha/DefaultSnapshotPolicy.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/build.properties U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-war/src/html/index.html U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata/src/test/com/bigdata/io/TestNameAndExtensionFilter.java C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata/src/java/com/bigdata/quorum/QuorumActor.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata/src/releases/RELEASE_1_3_0.txt U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestReificationDoneRightParser.java --- Merging r7607 through r7684 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java --- Merging r7607 through r7684 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/PrefixDeclProcessor.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreWithoutStatementIdentifiers.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-01a.rq U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/TestReificationDoneRightEval.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-03a.rq U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-01.ttl U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-03.ttl U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-02a.ttl U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-03a.ttl U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java Merge complete. ===== File Statistics: ===== Updated: 28 ==== Conflict Statistics: ===== File conflicts: 2 }}} For both AbstractQuorum and QuorumActor, the conflicts were resolved by using the versions from the feature branch (MGC_1_3_0). See #779 Revision Links: -------------- http://bigdata.svn.sourceforge.net/bigdata/?rev=7684&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7607&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7684&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7607&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7684&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7607&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7684&view=rev Modified Paths: -------------- branches/MGC_1_3_0/bigdata/src/releases/RELEASE_1_3_0.txt branches/MGC_1_3_0/bigdata/src/test/com/bigdata/io/TestNameAndExtensionFilter.java branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/DefaultSnapshotPolicy.java branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/NoSnapshotPolicy.java branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServer.java branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/TestReificationDoneRightEval.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-01.ttl branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-01a.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-02a.ttl branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-03.ttl branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-03a.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-03a.ttl branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreWithoutStatementIdentifiers.java branches/MGC_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/PrefixDeclProcessor.java branches/MGC_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java branches/MGC_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java branches/MGC_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestReificationDoneRightParser.java branches/MGC_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java branches/MGC_1_3_0/bigdata-war/src/html/index.html branches/MGC_1_3_0/build.properties Property Changed: ---------------- branches/MGC_1_3_0/ branches/MGC_1_3_0/bigdata/lib/jetty/ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate/ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/util/ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/htree/raba/ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/jsr166/ branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/ branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/util/ branches/MGC_1_3_0/bigdata/src/test/com/bigdata/jsr166/ branches/MGC_1_3_0/bigdata/src/test/com/bigdata/util/httpd/ branches/MGC_1_3_0/bigdata-compatibility/ branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/attr/ branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/disco/ branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/util/config/ branches/MGC_1_3_0/bigdata-perf/ branches/MGC_1_3_0/bigdata-perf/btc/ branches/MGC_1_3_0/bigdata-perf/btc/src/resources/ branches/MGC_1_3_0/bigdata-perf/lubm/ branches/MGC_1_3_0/bigdata-perf/uniprot/ branches/MGC_1_3_0/bigdata-perf/uniprot/src/ branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/ branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/error/ branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/MGC_1_3_0/bigdata-rdf/src/samples/ branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/ branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/MGC_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/MGC_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/MGC_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ branches/MGC_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/ branches/MGC_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ branches/MGC_1_3_0/dsi-utils/ branches/MGC_1_3_0/dsi-utils/LEGAL/ branches/MGC_1_3_0/dsi-utils/lib/ branches/MGC_1_3_0/dsi-utils/src/ branches/MGC_1_3_0/dsi-utils/src/java/ branches/MGC_1_3_0/dsi-utils/src/java/it/ branches/MGC_1_3_0/dsi-utils/src/java/it/unimi/ branches/MGC_1_3_0/dsi-utils/src/test/ branches/MGC_1_3_0/dsi-utils/src/test/it/unimi/ branches/MGC_1_3_0/dsi-utils/src/test/it/unimi/dsi/ branches/MGC_1_3_0/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/MGC_1_3_0/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/MGC_1_3_0/osgi/ branches/MGC_1_3_0/src/resources/bin/config/ Property changes on: branches/MGC_1_3_0 ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7380 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE:7215-7271 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/ZK_DISCONNECT_HANDLING:7465-7484 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7380 /branches/BIGDATA_RELEASE_1_3_0:7608-7684 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE:7215-7271 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/ZK_DISCONNECT_HANDLING:7465-7484 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 Property changes on: branches/MGC_1_3_0/bigdata/lib/jetty ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/lib/jetty:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/lib/jetty:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty:7608-7684 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/lib/jetty:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/lib/jetty:7465-7484 Property changes on: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/aggregate:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:7608-7684 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/aggregate:7465-7484 Property changes on: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/joinGraph:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph:7608-7684 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/joinGraph:7465-7484 Property changes on: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/util:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/util:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util:7608-7684 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/util:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/util:7465-7484 Property changes on: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/htree/raba ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/htree/raba:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/htree/raba:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/htree/raba:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba:7608-7684 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/htree/raba:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/htree/raba:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/htree/raba:7465-7484 Property changes on: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/jsr166 ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/jsr166:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/jsr166:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/jsr166:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/jsr166:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/jsr166:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/jsr166:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166:7608-7684 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/jsr166:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/jsr166:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/jsr166:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/jsr166:7465-7484 Modified: branches/MGC_1_3_0/bigdata/src/releases/RELEASE_1_3_0.txt =================================================================== --- branches/MGC_1_3_0/bigdata/src/releases/RELEASE_1_3_0.txt 2013-12-19 21:11:18 UTC (rev 7684) +++ branches/MGC_1_3_0/bigdata/src/releases/RELEASE_1_3_0.txt 2013-12-19 21:34:30 UTC (rev 7685) @@ -1,4 +1,4 @@ -This is a minor release of bigdata(R). +This is a major release of bigdata(R). Bigdata is a horizontally-scaled, open-source architecture for indexed data with an emphasis on RDF capable of loading 1B triples in under one hour on a 15 node cluster. Bigdata operates in both a single machine mode (Journal), highly available replication cluster mode (HAJournalServer), and a horizontally sharded cluster mode (BigdataFederation). The Journal provides fast scalable ACID indexed storage for very large data sets, up to 50 billion triples / quads. The HAJournalServer adds replication, online backup, horizontal scaling of query, and high availability. The federation provides fast scalable shard-wise parallel indexed storage using dynamic sharding and shard-wise ACID updates and incremental cluster size growth. Both platforms support fully concurrent readers with snapshot isolation. @@ -8,8 +8,10 @@ Starting with the 1.0.0 release, we offer a WAR artifact [8] for easy installation of the single machine RDF database. For custom development and cluster installations we recommend checking out the code from SVN using the tag for this release. The code will build automatically under eclipse. You can also build the code using the ant script. The cluster installer requires the use of the ant script. -You can download the WAR from: +Starting with the 1.3.0 release, we offer a tarball artifact [10] for easy installation of the HA replication cluster. +You can download the WAR (standalone) or HA artifacts from: + http://sourceforge.net/projects/bigdata/ You can checkout this release from: @@ -18,16 +20,15 @@ New features: -- SPARQL 1.1 Update Extensions (SPARQL UPDATE for named solution sets). See https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=SPARQL_Update for more information. -- SPARQL 1.1 Property Paths. -- Remote Java client for Multi-Tenancy extensions NanoSparqlServer -- Sesame 2.6.10 dependency +- High availability [10]. +- Property Path performance enhancements. - Plus numerous other bug fixes and performance enhancements. Feature summary: +- Highly Available Replication Clusters (HAJournalServer [10]) - Single machine data storage to ~50B triples/quads (RWStore); -- Clustered data storage is essentially unlimited; +- Clustered data storage is essentially unlimited (BigdataFederation); - Simple embedded and/or webapp deployment (NanoSparqlServer); - Triples, quads, or triples with provenance (SIDs); - Fast RDFS+ inference and truth maintenance; @@ -37,14 +38,94 @@ Road map [3]: -- High availability for the journal and the cluster. -- Runtime Query Optimizer for Analytic Query mode; and -- Simplified deployment, configuration, and administration for clusters. +- RDF Graph Mining API [12]; +- Reification Done Right [11]; +- Column-wise indexing; +- Runtime Query Optimizer for Analytic Query mode; +- Performance optimization for scale-out clusters; and +- Simplified deployment, configuration, and administration for scale-out clusters. Change log: Note: Versions with (*) MAY require data migration. For details, see [9]. +1.3.0: + +- http://sourceforge.net/apps/trac/bigdata/ticket/530 (Journal HA) +- http://sourceforge.net/apps/trac/bigdata/ticket/621 (Coalesce write cache records and install reads in cache) +- http://sourceforge.net/apps/trac/bigdata/ticket/623 (HA TXS) +- http://sourceforge.net/apps/trac/bigdata/ticket/639 (Remove triple-buffering in RWStore) +- http://sourceforge.net/apps/trac/bigdata/ticket/645 (HA backup) +- http://sourceforge.net/apps/trac/bigdata/ticket/646 (River not compatible with newer 1.6.0 and 1.7.0 JVMs) +- http://sourceforge.net/apps/trac/bigdata/ticket/648 (Add a custom function to use full text index for filtering.) +- http://sourceforge.net/apps/trac/bigdata/ticket/651 (RWS test failure) +- http://sourceforge.net/apps/trac/bigdata/ticket/652 (Compress write cache blocks for replication and in HALogs) +- http://sourceforge.net/apps/trac/bigdata/ticket/662 (Latency on followers during commit on leader) +- http://sourceforge.net/apps/trac/bigdata/ticket/663 (Issue with OPTIONAL blocks) +- http://sourceforge.net/apps/trac/bigdata/ticket/664 (RWStore needs post-commit protocol) +- http://sourceforge.net/apps/trac/bigdata/ticket/665 (HA3 LOAD non-responsive with node failure) +- http://sourceforge.net/apps/trac/bigdata/ticket/666 (Occasional CI deadlock in HALogWriter testConcurrentRWWriterReader) +- http://sourceforge.net/apps/trac/bigdata/ticket/670 (Accumulating HALog files cause latency for HA commit) +- http://sourceforge.net/apps/trac/bigdata/ticket/671 (Query on follower fails during UPDATE on leader) +- http://sourceforge.net/apps/trac/bigdata/ticket/673 (DGC in release time consensus protocol causes native thread leak in HAJournalServer at each commit) +- http://sourceforge.net/apps/trac/bigdata/ticket/674 (WCS write cache compaction causes errors in RWS postHACommit()) +- http://sourceforge.net/apps/trac/bigdata/ticket/676 (Bad patterns for timeout computations) +- http://sourceforge.net/apps/trac/bigdata/ticket/677 (HA deadlock under UPDATE + QUERY) +- http://sourceforge.net/apps/trac/bigdata/ticket/678 (DGC Thread and Open File Leaks: sendHALogForWriteSet()) +- http://sourceforge.net/apps/trac/bigdata/ticket/679 (HAJournalServer can not restart due to logically empty log file) +- http://sourceforge.net/apps/trac/bigdata/ticket/681 (HAJournalServer deadlock: pipelineRemove() and getLeaderId()) +- http://sourceforge.net/apps/trac/bigdata/ticket/684 (Optimization with skos altLabel) +- http://sourceforge.net/apps/trac/bigdata/ticket/686 (Consensus protocol does not detect clock skew correctly) +- http://sourceforge.net/apps/trac/bigdata/ticket/687 (HAJournalServer Cache not populated) +- http://sourceforge.net/apps/trac/bigdata/ticket/689 (Missing URL encoding in RemoteRepositoryManager) +- http://sourceforge.net/apps/trac/bigdata/ticket/690 (Error when using the alias "a" instead of rdf:type for a multipart insert) +- http://sourceforge.net/apps/trac/bigdata/ticket/691 (Failed to re-interrupt thread in HAJournalServer) +- http://sourceforge.net/apps/trac/bigdata/ticket/692 (Failed to re-interrupt thread) +- http://sourceforge.net/apps/trac/bigdata/ticket/693 (OneOrMorePath SPARQL property path expression ignored) +- http://sourceforge.net/apps/trac/bigdata/ticket/694 (Transparently cancel update/query in RemoteRepository) +- http://sourceforge.net/apps/trac/bigdata/ticket/695 (HAJournalServer reports "follower" but is in SeekConsensus and is not participating in commits.) +- http://sourceforge.net/apps/trac/bigdata/ticket/701 (Problems in BackgroundTupleResult) +- http://sourceforge.net/apps/trac/bigdata/ticket/702 (InvocationTargetException on /namespace call) +- http://sourceforge.net/apps/trac/bigdata/ticket/704 (ask does not return json) +- http://sourceforge.net/apps/trac/bigdata/ticket/705 (Race between QueryEngine.putIfAbsent() and shutdownNow()) +- http://sourceforge.net/apps/trac/bigdata/ticket/706 (MultiSourceSequentialCloseableIterator.nextSource() can throw NPE) +- http://sourceforge.net/apps/trac/bigdata/ticket/707 (BlockingBuffer.close() does not unblock threads) +- http://sourceforge.net/apps/trac/bigdata/ticket/708 (BIND heisenbug - race condition on select query with BIND) +- http://sourceforge.net/apps/trac/bigdata/ticket/711 (sparql protocol: mime type application/sparql-query) +- http://sourceforge.net/apps/trac/bigdata/ticket/712 (SELECT ?x { OPTIONAL { ?x eg:doesNotExist eg:doesNotExist } } incorrect) +- http://sourceforge.net/apps/trac/bigdata/ticket/715 (Interrupt of thread submitting a query for evaluation does not always terminate the AbstractRunningQuery) +- http://sourceforge.net/apps/trac/bigdata/ticket/716 (Verify that IRunningQuery instances (and nested queries) are correctly cancelled when interrupted) +- http://sourceforge.net/apps/trac/bigdata/ticket/718 (HAJournalServer needs to handle ZK client connection loss) +- http://sourceforge.net/apps/trac/bigdata/ticket/720 (HA3 simultaneous service start failure) +- http://sourceforge.net/apps/trac/bigdata/ticket/723 (HA asynchronous tasks must be canceled when invariants are changed) +- http://sourceforge.net/apps/trac/bigdata/ticket/725 (FILTER EXISTS in subselect) +- http://sourceforge.net/apps/trac/bigdata/ticket/726 (Logically empty HALog for committed transaction) +- http://sourceforge.net/apps/trac/bigdata/ticket/727 (DELETE/INSERT fails with OPTIONAL non-matching WHERE) +- http://sourceforge.net/apps/trac/bigdata/ticket/728 (Refactor to create HAClient) +- http://sourceforge.net/apps/trac/bigdata/ticket/729 (ant bundleJar not working) +- http://sourceforge.net/apps/trac/bigdata/ticket/731 (CBD and Update leads to 500 status code) +- http://sourceforge.net/apps/trac/bigdata/ticket/732 (describe statement limit does not work) +- http://sourceforge.net/apps/trac/bigdata/ticket/733 (Range optimizer not optimizing Slice service) +- http://sourceforge.net/apps/trac/bigdata/ticket/734 (two property paths interfere) +- http://sourceforge.net/apps/trac/bigdata/ticket/736 (MIN() malfunction) +- http://sourceforge.net/apps/trac/bigdata/ticket/737 (class cast exception) +- http://sourceforge.net/apps/trac/bigdata/ticket/739 (Inconsistent treatment of bind and optional property path) +- http://sourceforge.net/apps/trac/bigdata/ticket/741 (ctc-striterators should build as independent top-level project (Apache2)) +- http://sourceforge.net/apps/trac/bigdata/ticket/743 (AbstractTripleStore.destroy() does not filter for correct prefix) +- http://sourceforge.net/apps/trac/bigdata/ticket/746 (Assertion error) +- http://sourceforge.net/apps/trac/bigdata/ticket/747 (BOUND bug) +- http://sourceforge.net/apps/trac/bigdata/ticket/748 (incorrect join with subselect renaming vars) +- http://sourceforge.net/apps/trac/bigdata/ticket/754 (Failure to setup SERVICE hook and changeLog for Unisolated and Read/Write connections) +- http://sourceforge.net/apps/trac/bigdata/ticket/755 (Concurrent QuorumActors can interfere leading to failure to progress) +- http://sourceforge.net/apps/trac/bigdata/ticket/756 (order by and group_concat) +- http://sourceforge.net/apps/trac/bigdata/ticket/760 (Code review on 2-phase commit protocol) +- http://sourceforge.net/apps/trac/bigdata/ticket/764 (RESYNC failure (HA)) +- http://sourceforge.net/apps/trac/bigdata/ticket/770 (alpp ordering) +- http://sourceforge.net/apps/trac/bigdata/ticket/772 (Query timeout only checked at operator start/stop.) +- http://sourceforge.net/apps/trac/bigdata/ticket/776 (Closed as duplicate of #490) +- http://sourceforge.net/apps/trac/bigdata/ticket/778 (HA Leader fail results in transient problem with allocations on other services) +- http://sourceforge.net/apps/trac/bigdata/ticket/783 (Operator Alerts (HA)) + 1.2.4: - http://sourceforge.net/apps/trac/bigdata/ticket/777 (ConcurrentModificationException in ASTComplexOptionalOptimizer) @@ -293,6 +374,9 @@ [7] http://www.systap.com/bigdata.htm [8] http://sourceforge.net/projects/bigdata/files/bigdata/ [9] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=DataMigration +[10] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=HAJournalServer +[11] http://www.bigdata.com/whitepapers/reifSPARQL.pdf +[12] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=RDF_GAS_API About bigdata: Property changes on: branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/bop/joinGraph:6766-7380 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/test/com/bigdata/bop/joinGraph:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/test/com/bigdata/bop/joinGraph:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/bop/joinGraph:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph:7608-7684 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/test/com/bigdata/bop/joinGraph:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/test/com/bigdata/bop/joinGraph:7465-7484 Property changes on: branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/bop/util:6766-7380 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/test/com/bigdata/bop/util:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/bop/util:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/test/com/bigdata/bop/util:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/bop/util:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/util:7608-7684 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/test/com/bigdata/bop/util:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/bop/util:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/test/com/bigdata/bop/util:7465-7484 Modified: branches/MGC_1_3_0/bigdata/src/test/com/bigdata/io/TestNameAndExtensionFilter.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/test/com/bigdata/io/TestNameAndExtensionFilter.java 2013-12-19 21:11:18 UTC (rev 7684) +++ branches/MGC_1_3_0/bigdata/src/test/com/bigdata/io/TestNameAndExtensionFilter.java 2013-12-19 21:34:30 UTC (rev 7685) @@ -53,7 +53,7 @@ /** * @param name */ - public TestNameAndExtensionFilter(String name) { + public TestNameAndExtensionFilter(final String name) { super(name); } @@ -64,85 +64,93 @@ * @param expected * @param actual */ - public void assertSameFiles( File[] expected, File[] actual ) - { - - if( expected == null ) { - - throw new AssertionError( "expected is null."); - + private void assertSameFiles(final File[] expected, final File[] actual) { + + if (expected == null) { + + throw new AssertionError("expected is null."); + } - if( actual == null ) { - - fail( "actual is null."); - + if (actual == null) { + + fail("actual is null."); + } - assertEquals( "#of files", expected.length, actual.length ); - + assertEquals("#of files", expected.length, actual.length); + // Insert the expected files into a set. - Set expectedSet = new HashSet(); - - for( int i=0; i<expected.length; i++ ) { + final Set<String> expectedSet = new HashSet<String>(); - File expectedFile = expected[ i ]; - - if( expectedFile == null ) { + for (int i = 0; i < expected.length; i++) { - throw new AssertionError( "expected file is null at index="+i ); - + final File expectedFile = expected[i]; + + if (expectedFile == null) { + + throw new AssertionError("expected file is null at index=" + i); + } - if( ! expectedSet.add( expectedFile.toString() ) ) { - - throw new AssertionError( "expected File[] contains duplicate: expected["+i+"]="+expectedFile ); - + if (!expectedSet.add(expectedFile.toString())) { + + throw new AssertionError( + "expected File[] contains duplicate: expected[" + i + + "]=" + expectedFile); + } - + } /* * Verify that each actual file occurs in the expectedSet using a * selection without replacement policy. */ - - for( int i=0; i<actual.length; i++ ) { - - File actualFile = actual[ i ]; - - if( actualFile == null ) { - - fail( "actual file is null at index="+i ); - + + for (int i = 0; i < actual.length; i++) { + + final File actualFile = actual[i]; + + if (actualFile == null) { + + fail("actual file is null at index=" + i); + } - - if( ! expectedSet.remove( actual[ i ].toString() ) ) { - - fail( "actual file="+actualFile+" at index="+i+" was not found in expected files." ); - + + if (!expectedSet.remove(actual[i].toString())) { + + fail("actual file=" + actualFile + " at index=" + i + + " was not found in expected files."); + } - + } - + } - + /** * Test verifies that no files are found using a guarenteed unique basename. */ - public void test_filter_001() throws IOException - { - - final File basefile = File.createTempFile(getName(),"-test"); - basefile.deleteOnExit(); - - final String basename = basefile.toString(); - System.err.println( "basename="+basename ); - - NameAndExtensionFilter logFilter = new NameAndExtensionFilter( basename, ".log" ); - - assertSameFiles( new File[]{}, logFilter.getFiles() ); - + public void test_filter_001() throws IOException { + + final File basefile = File.createTempFile(getName(), "-test"); + + try { + + final String basename = basefile.toString(); + + final NameAndExtensionFilter logFilter = new NameAndExtensionFilter( + basename, ".log"); + + assertSameFiles(new File[] {}, logFilter.getFiles()); + + } finally { + + basefile.delete(); + + } + } /** @@ -150,33 +158,48 @@ */ public void test_filter_002() throws IOException { - int N = 100; - - final File logBaseFile = File.createTempFile(getName(),"-test"); - logBaseFile.deleteOnExit(); - - final String basename = logBaseFile.toString(); - System.err.println( "basename="+basename ); - - NameAndExtensionFilter logFilter = new NameAndExtensionFilter( basename, ".log" ); + final int N = 100; - Vector v = new Vector( N ); - - for( int i=0; i<N; i++ ) { + final Vector<File> v = new Vector<File>(N); - File logFile = new File( basename+"."+i+".log" ); - logFile.deleteOnExit(); - logFile.createNewFile(); -// System.err.println( "logFile="+logFile ); - - v.add( logFile ); - + final File logBaseFile = File.createTempFile(getName(), "-test"); + // logBaseFile.deleteOnExit(); + + try { + + final String basename = logBaseFile.toString(); + // System.err.println( "basename="+basename ); + + final NameAndExtensionFilter logFilter = new NameAndExtensionFilter( + basename, ".log"); + + for (int i = 0; i < N; i++) { + + final File logFile = new File(basename + "." + i + ".log"); + // logFile.deleteOnExit(); + logFile.createNewFile(); + // System.err.println( "logFile="+logFile ); + + v.add(logFile); + + } + + final File[] expectedFiles = (File[]) v.toArray(new File[] {}); + + assertSameFiles(expectedFiles, logFilter.getFiles()); + + } finally { + + logBaseFile.delete(); + + for (File f : v) { + + f.delete(); + + } + } - File[] expectedFiles = (File[]) v.toArray(new File[]{}); - - assertSameFiles( expectedFiles, logFilter.getFiles() ); - } } Property changes on: branches/MGC_1_3_0/bigdata/src/test/com/bigdata/jsr166 ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/jsr166:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/jsr166:6766-7380 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/jsr166:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/test/com/bigdata/jsr166:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/jsr166:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/jsr166:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/test/com/bigdata/jsr166:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/jsr166:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/jsr166:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/jsr166:7608-7684 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/jsr166:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/test/com/bigdata/jsr166:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/jsr166:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/jsr166:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/test/com/bigdata/jsr166:7465-7484 Property changes on: branches/MGC_1_3_0/bigdata/src/test/com/bigdata/util/httpd ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/util/httpd:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/util/httpd:6766-7380 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/test/com/bigdata/util/httpd:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/util/httpd:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/test/com/bigdata/util/httpd:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/util/httpd:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/util/httpd:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/util/httpd:7608-7684 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/test/com/bigdata/util/httpd:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/util/httpd:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/test/com/bigdata/util/httpd:7465-7484 Property changes on: branches/MGC_1_3_0/bigdata-compatibility ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-compatibility:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-compatibility:6766-7380 /branches/INT64_BRANCH/bigdata-compatibility:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-compatibility:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-compatibility:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata-compatibility:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata-compatibility:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-compatibility:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata-compatibility:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-compatibility:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-compatibility:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata-compatibility:7608-7684 /branches/INT64_BRANCH/bigdata-compatibility:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-compatibility:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-compatibility:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata-compatibility:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata-compatibility:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-compatibility:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata-compatibility:7465-7484 Property changes on: branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/attr ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-jini/src/java/com/bigdata/attr:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/attr:6766-7380 /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/attr:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-jini/src/java/com/bigdata/attr:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-jini/src/java/com/bigdata/attr:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/attr:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-jini/src/java/com/bigdata/attr:4175-4387 /branches/LEXICON_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/attr:2633-3304 /branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/attr:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/attr:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata-jini/src/java/com/bigdata/attr:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-jini/src/java/com/bigdata/attr:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/java/com/bigdata/attr:7465-7484 /branches/bugfix-btm/bigdata-jini/src/java/com/bigdata/attr:2594-3237 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/attr:2574-2730 /branches/fko/bigdata-jini/src/java/com/bigdata/attr:3150-3194 /trunk/bigdata-jini/src/java/com/bigdata/attr:2981-3282,3368-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-jini/src/java/com/bigdata/attr:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/attr:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/attr:7608-7684 /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/attr:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-jini/src/java/com/bigdata/attr:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-jini/src/java/com/bigdata/attr:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/attr:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-jini/src/java/com/bigdata/attr:4175-4387 /branches/LEXICON_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/attr:2633-3304 /branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/attr:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/attr:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata-jini/src/java/com/bigdata/attr:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-jini/src/java/com/bigdata/attr:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/java/com/bigdata/attr:7465-7484 /branches/bugfix-btm/bigdata-jini/src/java/com/bigdata/attr:2594-3237 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/attr:2574-2730 /branches/fko/bigdata-jini/src/java/com/bigdata/attr:3150-3194 /trunk/bigdata-jini/src/java/com/bigdata/attr:2981-3282,3368-3437,3656-4061 Property changes on: branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/disco ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-jini/src/java/com/bigdata/disco:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/disco:6766-7380 /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/disco:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-jini/src/java/com/bigdata/disco:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-jini/src/java/com/bigdata/disco:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/disco:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-jini/src/java/com/bigdata/disco:4175-4387 /branches/LEXICON_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/disco:2633-3304 /branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/disco:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/disco:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata-jini/src/java/com/bigdata/disco:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-jini/src/java/com/bigdata/disco:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/java/com/bigdata/disco:7465-7484 /branches/bugfix-btm/bigdata-jini/src/java/com/bigdata/disco:2594-3237 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/disco:2574-2730 /branches/fko/bigdata-jini/src/java/com/bigdata/disco:3150-3194 /trunk/bigdata-jini/src/java/com/bigdata/disco:2981-3282,3368-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-jini/src/java/com/bigdata/disco:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/disco:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/disco:7608-7684 /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/disco:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-jini/src/java/com/bigdata/disco:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-jini/src/java/com/bigdata/disco:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/disco:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-jini/src/java/com/bigdata/disco:4175-4387 /branches/LEXICON_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/disco:2633-3304 /branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/disco:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/disco:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata-jini/src/java/com/bigdata/disco:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-jini/src/java/com/bigdata/disco:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/java/com/bigdata/disco:7465-7484 /branches/bugfix-btm/bigdata-jini/src/java/com/bigdata/disco:2594-3237 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/disco:2574-2730 /branches/fko/bigdata-jini/src/java/com/bigdata/disco:3150-3194 /trunk/bigdata-jini/src/java/com/bigdata/disco:2981-3282,3368-3437,3656-4061 Modified: branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/DefaultSnapshotPolicy.java =================================================================== --- branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/DefaultSnapshotPolicy.java 2013-12-19 21:11:18 UTC (rev 7684) +++ branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/DefaultSnapshotPolicy.java 2013-12-19 21:34:30 UTC (rev 7685) @@ -136,6 +136,7 @@ } + @Override public void init(final HAJournal jnl) { // delay until next run (ms). Modified: branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-12-19 21:11:18 UTC (rev 7684) +++ branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-12-19 21:34:30 UTC (rev 7685) @@ -54,6 +54,8 @@ import org.apache.log4j.Logger; import com.bigdata.concurrent.FutureTaskInvariantMon; +import com.bigdata.counters.CounterSet; +import com.bigdata.counters.Instrument; import com.bigdata.ha.HAGlue; import com.bigdata.ha.QuorumService; import com.bigdata.ha.RunState; @@ -94,6 +96,7 @@ import com.bigdata.journal.jini.ha.HAJournalServer.NSSConfigurationOptions; import com.bigdata.journal.jini.ha.HAJournalServer.RunStateEnum; import com.bigdata.quorum.Quorum; +import com.bigdata.resources.StoreManager.IStoreManagerCounters; import com.bigdata.service.AbstractTransactionService; import com.bigdata.service.jini.JiniClient; import com.bigdata.service.jini.RemoteAdministrable; @@ -729,6 +732,208 @@ // } /** + * Interface for additional performance counters exposed by the + * {@link HAJournal}. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + public interface IHAJournalCounters { + + /** + * The namespace for counters pertaining to free space on the various + * volumes. + */ + String Volumes = "Volumes"; + +// /** +// * The configured service directory. +// */ +// String ServiceDir = "ServiceDir"; +// +// /** +// * The configured data directory (the directory in which the Journal +// * file is stored). +// */ +// String DataDir = "DataDir"; +// +// /** +// * The configured HALog directory. +// */ +// String HALogDir = "HALogDir"; +// +// /** +// * The configured Snapshot directory. +// */ +// String SnapshotDir = "ShapshotDir"; +// +// /** +// * The configured tmp directory. +// */ +// String TmpDir = "TmpDir"; + + /** + * The #of bytes available on the disk volume on which the service + * directory is located. + * + * @see HAJournalServer#getServiceDir() + */ + String ServiceDirBytesAvailable = "Service Volume Bytes Available"; + + /** + * The #of bytes available on the disk volume on which the data + * directory is located (the directory in which the Journal file + * is stored). + * + * @see Journal#getDataDir() + */ + String DataDirBytesAvailable = "Data Volume Bytes Available"; + + /** + * The #of bytes available on the disk volume on which the HALog + * directory is located. + * + * @see HALogNexus#getHALogDir() + */ + String HALogDirBytesAvailable = "HALog Volume Bytes Available"; + + /** + * The #of bytes available on the disk volume on which the snapshot + * directory is located. + * + ... [truncated message content] |
From: <tho...@us...> - 2013-12-19 21:11:26
|
Revision: 7684 http://bigdata.svn.sourceforge.net/bigdata/?rev=7684&view=rev Author: thompsonbry Date: 2013-12-19 21:11:18 +0000 (Thu, 19 Dec 2013) Log Message: ----------- Adding my version of TestSocketsDirect prior to merging in changes from the main development branch. See #779 Added Paths: ----------- branches/MGC_1_3_0/bigdata/src/test/com/bigdata/ha/pipeline/TestSocketsDirect.java Added: branches/MGC_1_3_0/bigdata/src/test/com/bigdata/ha/pipeline/TestSocketsDirect.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/test/com/bigdata/ha/pipeline/TestSocketsDirect.java (rev 0) +++ branches/MGC_1_3_0/bigdata/src/test/com/bigdata/ha/pipeline/TestSocketsDirect.java 2013-12-19 21:11:18 UTC (rev 7684) @@ -0,0 +1,818 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +package com.bigdata.ha.pipeline; + +import java.io.IOException; +import java.io.InputStream; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.SocketOption; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.util.ArrayList; +import java.util.Random; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; + +import junit.framework.AssertionFailedError; + +import com.bigdata.btree.BytesUtil; +import com.bigdata.io.TestCase3; + +/** + * Test suite for basic socket behaviors. + * <p> + * Note: Tests in this suite should use direct byte buffers (non-heap NIO) + * buffers in order accurately model the conditions that bigdata uses for write + * replication. If you use heap byte[]s, then they are copied into an NIO direct + * buffer before they are transmitted over a socket. By using NIO direct + * buffers, we stay within the zero-copy pattern for sockets. + * <p> + * Note: Tests in this suite need to use {@link ServerSocketChannel#open()} to + * get access to the stream oriented listening interface for the server side of + * the socket. This is what is used by the {@link HAReceiveService}. It also + * sets up the {@link ServerSocketChannel} in a non-blocking mode and then uses + * the selectors to listen for available data. See {@link HAReceiveService}. + * + * @author <a href="mailto:mar...@us...">Martyn + * Cutcher</a> + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class TestSocketsDirect extends TestCase3 { + + public TestSocketsDirect() { + } + + public TestSocketsDirect(String name) { + super(name); + } + + /** + * Writes out the available options for the client and server socket. + * + * @throws IOException + */ + public void testDirectSockets_options() throws IOException { + + // Get a socket addresss for an unused port. + final InetSocketAddress serverAddr = new InetSocketAddress(getPort(0)); + + // First our ServerSocketChannel + final ServerSocketChannel ssc = ServerSocketChannel.open(); + try { + + // bind the ServerSocket to the specified port. + ssc.bind(serverAddr); + + // Now the first Client SocketChannel + final SocketChannel cs = SocketChannel.open(); + try { + /* + * Note: true if connection made. false if connection in + * progress. + */ + final boolean immediate = cs.connect(serverAddr); + if (!immediate) { + // Did not connect immediately, so finish connect now. + if (!cs.finishConnect()) { + fail("Did not connect."); + } + } + + /* + * Write out the client socket options. + */ + log.info("Client:: isOpen=" + cs.isOpen()); + log.info("Client:: isBlocking=" + cs.isBlocking()); + log.info("Client:: isRegistered=" + cs.isRegistered()); + log.info("Client:: isConnected=" + cs.isConnected()); + log.info("Client:: isConnectionPending=" + + cs.isConnectionPending()); + for (SocketOption<?> opt : cs.supportedOptions()) { + log.info("Client:: " + opt + " := " + cs.getOption(opt)); + } + + /* + * Note: We need to use ServerSocketChannel.open() to get access + * to the stream oriented listening interface for the server + * side of the socket. + */ + log.info("Server:: isOpen=" + ssc.isOpen()); + log.info("Server:: isBlocking=" + ssc.isBlocking()); + log.info("Server:: isRegistered=" + ssc.isRegistered()); + for (SocketOption<?> opt : ssc.supportedOptions()) { + log.info("Server:: " + opt + " := " + cs.getOption(opt)); + } + + } finally { + cs.close(); + } + + } finally { + + ssc.close(); + + } + + } + + /** + * Simple test of connecting to a server socket and the failure to connect + * to a port not associated with a server socket. + * + * @throws IOException + */ + public void testDirectSockets_exceptionIfPortNotOpen() throws IOException { + + // Get two socket addressses. We will open a service on one and try to + // connect to the unused one on the other port. + final InetSocketAddress serverAddr1 = new InetSocketAddress(getPort(0)); + final InetSocketAddress serverAddr2 = new InetSocketAddress(getPort(0)); + + // First our ServerSocket + final ServerSocket ss1 = new ServerSocket(); + try { + + // bind the ServerSocket to the specified port. + ss1.bind(serverAddr1); + + assertTrue(ss1.getChannel() == null); + + /* + * Without a new connect request we should not be able to accept() a + * new connection. + */ + try { + accept(ss1); + fail("Expected timeout failure"); + } catch (AssertionFailedError afe) { + // expected + } + + // Now the first Client SocketChannel + final SocketChannel cs1 = SocketChannel.open(); + try { + /* + * Note: true if connection made. false if connection in + * progress. + */ + final boolean immediate1 = cs1.connect(serverAddr1); + if (!immediate1) { + // Did not connect immediately, so finish connect now. + if (!cs1.finishConnect()) { + fail("Did not connect."); + } + } + } finally { + cs1.close(); + } + + // Now the first Client SocketChannel + final SocketChannel cs2 = SocketChannel.open(); + try { + cs1.connect(serverAddr2); + fail("Expecting " + IOException.class); + } catch (IOException ex) { + if(log.isInfoEnabled()) + log.info("Ignoring expected exception: "+ex); + } finally { + cs2.close(); + } + + /* + * Without a new connect request we should not be able to accept() a + * new connection. + */ + try { + accept(ss1); + fail("Expected timeout failure"); + } catch (AssertionFailedError afe) { + // expected + } + + } finally { + + ss1.close(); + + } + + } + + /** + * Test of a large write on a socket to understand what happens when the + * write is greater than the combined size of the client send buffer and the + * server receive buffer and the server side of the socket is either not + * accepted or already shutdown. + * + * @throws IOException + * @throws InterruptedException + */ + public void testDirectSockets_largeWrite_NotAccepted() throws IOException, + InterruptedException { + + final Random r = new Random(); + + // Get a socket addresss for an unused port. + final InetSocketAddress serverAddr = new InetSocketAddress(getPort(0)); + + // First our ServerSocket + final ServerSocket ss = new ServerSocket(); + try { + + // Size of the server socket receive buffer. + final int receiveBufferSize = ss.getReceiveBufferSize(); + + // Allocate buffer twice as large as the receive buffer. + final byte[] largeBuffer = new byte[receiveBufferSize * 10]; + + if (log.isInfoEnabled()) { + log.info("receiveBufferSize=" + receiveBufferSize + + ", largeBufferSize=" + largeBuffer.length); + } + + // fill buffer with random data. + r.nextBytes(largeBuffer); + + // bind the ServerSocket to the specified port. + ss.bind(serverAddr); + + // Now the first Client SocketChannel + final SocketChannel cs = SocketChannel.open(); + try { + /* + * Note: true if connection made. false if connection in + * progress. + */ + final boolean immediate = cs.connect(serverAddr); + if (!immediate) { + // Did not connect immediately, so finish connect now. + if (!cs.finishConnect()) { + fail("Did not connect."); + } + } + + /* + * Attempt to write data. The server socket is not yet accepted. + * This should hit a timeout. + */ + assertTimeout(10L, TimeUnit.SECONDS, new WriteBufferTask(cs, + ByteBuffer.wrap(largeBuffer))); + + accept(ss); + + } finally { + cs.close(); + } + + } finally { + + ss.close(); + + } + + } + + /** + * The use of threaded tasks in the send/receive service makes it difficult to + * observer the socket state changes. + * + * So let's begin by writing some tests over the raw sockets. + * + * Note that connecting and then immediately closing the socket is perfectly okay. + * ...with an accept followed by a read() of -1 on the returned Socket stream. + * + * @throws IOException + * @throws InterruptedException + */ + public void testDirectSockets() throws IOException, InterruptedException { + + // The payload size that we will use. + final int DATA_LEN = 200; + + final Random r = new Random(); + final byte[] data = new byte[DATA_LEN]; + r.nextBytes(data); + final byte[] dst = new byte[DATA_LEN]; + + // The server side receive buffer size (once we open the server socket). + int receiveBufferSize = -1; + + final InetSocketAddress serverAddr = new InetSocketAddress(getPort(0)); + + // First our ServerSocket + final ServerSocket ss = new ServerSocket(); + try { + + assertTrue(ss.getChannel() == null); + + // bind the server socket to the port. + ss.bind(serverAddr); + + assertTrue(ss.getChannel() == null); + + // figure out the receive buffer size on the server socket. + receiveBufferSize = ss.getReceiveBufferSize(); + + if (log.isInfoEnabled()) + log.info("receiveBufferSize=" + receiveBufferSize + + ", payloadSize=" + DATA_LEN); + + if (receiveBufferSize < DATA_LEN) { + + fail("Service socket receive buffer is smaller than test payload size: receiveBufferSize=" + + receiveBufferSize + ", payloadSize=" + DATA_LEN); + + } + + { + /* + * InputStream for server side of socket connection - set below and + * then reused outside of the try/finally block. + */ + InputStream instr = null; + + // Now the first Client SocketChannel + final SocketChannel cs1 = SocketChannel.open(); + try { + + /* + * Note: true if connection made. false if connection in + * progress. + */ + final boolean immediate1 = cs1.connect(serverAddr); + if (!immediate1) { + if (!cs1.finishConnect()) { + fail("Did not connect?"); + } + } + + assertTrue(ss.getChannel() == null); + + /* + * We are connected. + */ + + final ByteBuffer src = ByteBuffer.wrap(data); + + // Write some data on the client socket. + cs1.write(src); + + /* + * Accept client's connection on server (after connect and + * write). + */ + final Socket readSckt1 = accept(ss); + + // Stream to read the data from the socket on the server + // side. + instr = readSckt1.getInputStream(); + + // and read the data + instr.read(dst); + + // confirming the read is correct + assertTrue(BytesUtil.bytesEqual(data, dst)); + + assertTrue(ss.getChannel() == null); + + /* + * Attempting to read more returns ZERO because there is + * nothing in the buffer and the connection is still open on + * the client side. + * + * Note: instr.read(buf) will BLOCK until the data is + * available, the EOF is detected, or an exception is + * thrown. + */ + assertEquals(0, instr.available()); + // assertEquals(0, instr.read(dst)); + + /* + * Now write some more data into the channel and *then* + * close it. + */ + cs1.write(ByteBuffer.wrap(data)); + + // close the client side of the socket + cs1.close(); + + // The server side of client connection is still open. + assertTrue(readSckt1.isConnected()); + assertFalse(readSckt1.isClosed()); + + /* + * Now try writing some more data. This should be disallowed + * since we closed the client side of the socket. + */ + try { + cs1.write(ByteBuffer.wrap(data)); + fail("Expected closed channel exception"); + } catch (ClosedChannelException e) { + // expected + } + + /* + * Since we closed the client side of the socket, when we + * try to read more data on the server side of the + * connection. The data that we already buffered is still + * available on the server side of the socket. + */ + { + // the already buffered data should be available. + final int rdlen = instr.read(dst); + assertEquals(DATA_LEN, rdlen); + assertTrue(BytesUtil.bytesEqual(data, dst)); + } + + /* + * We have drained the buffered data. There is no more + * buffered data and client side is closed, so an attempt to + * read more data on the server side of the socket will + * return EOF (-1). + */ + assertEquals(-1, instr.read(dst)); // read EOF + + // if so then should we explicitly close its socket? + readSckt1.close(); + assertTrue(readSckt1.isClosed()); + + /* + * Still reports EOF after the accepted server socket is + * closed. + */ + assertEquals(-1, instr.read(dst)); + + assertFalse(ss.isClosed()); + assertTrue(ss.getChannel() == null); + + } finally { + cs1.close(); + } + + // failing to read from original stream + final int nrlen = instr.read(dst); + assertEquals(-1, nrlen); + } + + /* + * Now open a new client Socket and connect to the server. + */ + final SocketChannel cs2 = SocketChannel.open(); + try { + + // connect to the server socket again. + final boolean immediate2 = cs2.connect(serverAddr); + if (!immediate2) { + if (!cs2.finishConnect()) { + fail("Did not connect?"); + } + } + + // Now server should accept the new client connection + final Socket s2 = accept(ss); + + // Client writes to the SocketChannel + final int wlen = cs2.write(ByteBuffer.wrap(data)); + assertEquals(DATA_LEN, wlen); // verify data written. + + // but succeeding to read from the new Socket + final InputStream instr2 = s2.getInputStream(); + instr2.read(dst); + assertTrue(BytesUtil.bytesEqual(data, dst)); + + /* + * Question: Can a downstream close be detected upstream? + * + * Answer: No. Closing the server socket does not tell the + * client that the socket was closed. + */ + { + // close server side input stream. + instr2.close(); + + // but the client still thinks its connected. + assertTrue(cs2.isOpen()); + + // Does the client believe it is still open after a brief + // sleep? + Thread.sleep(1000); + assertTrue(cs2.isOpen()); // yes. + + // close server stocket. + s2.close(); + + // client still thinks it is connected after closing server + // socket. + assertTrue(cs2.isOpen()); + + // Does the client believe it is still open after a brief + // sleep? + Thread.sleep(1000); + assertTrue(cs2.isOpen()); // yes. + + } + + /* + * Now write some more to the socket. We have closed the + * accepted connection on the server socket. Our observations + * show that the 1st write succeeds. The second write then fails + * with 'IOException: "Broken pipe"' + * + * The server socket is large (256k). We are not filling it up, + * but the 2nd write always fails. Further, the client never + * believes that the connection is closed until the 2nd write, + */ + { + final int writeSize = 1; + int nwritesOk = 0; + long nbytesReceived = 0L; + while (true) { + try { + // write a payload. + final int wlen2 = cs2.write(ByteBuffer.wrap(data, + 0, writeSize)); + // if write succeeds, should have written all bytes. + assertEquals(writeSize, wlen2); + nwritesOk++; + nbytesReceived += wlen2; + // does the client think the connection is still open? + assertTrue(cs2.isOpen()); // yes. + Thread.sleep(1000); + assertTrue(cs2.isOpen()); // yes. + } catch (IOException ex) { + if (log.isInfoEnabled()) + log.info("Expected exception: nwritesOk=" + + nwritesOk + ", nbytesReceived=" + + nbytesReceived + ", ex=" + ex); + break; + } + } + } + + /* + * Having closed the input, without a new connect request we + * should not be able to accept the new write since the data + * were written on a different client connection. + */ + try { + final Socket s3 = accept(ss); + fail("Expected timeout failure"); + } catch (AssertionFailedError afe) { + // expected + } + + } finally { + cs2.close(); + } + + } finally { + ss.close(); + } + + } + + /** + * Confirms that multiple clients can communicate with same Server + * + * @throws IOException + */ + public void testMultipleClients() throws IOException { + + // The payload size that we will use. + final int DATA_LEN = 200; + final Random r = new Random(); + final byte[] data = new byte[DATA_LEN]; + r.nextBytes(data); + + final int nclients = 10; + + final ArrayList<SocketChannel> clients = new ArrayList<SocketChannel>(); + final ArrayList<Socket> sockets = new ArrayList<Socket>(); + + final InetSocketAddress serverAddr = new InetSocketAddress(getPort(0)); + + final ServerSocket ss = new ServerSocket(); + try { + + // bind the ServerSocket to the specified port. + ss.bind(serverAddr); + + assertTrue(ss.getChannel() == null); + + final int receiveBufferSize = ss.getReceiveBufferSize(); + + // Make sure that we have enough room to receive all client writes + // before draining any of them. + assertTrue(DATA_LEN * nclients <= receiveBufferSize); + + assertNoTimeout(10, TimeUnit.SECONDS, new Callable<Void>() { + + @Override + public Void call() throws Exception { + + for (int c = 0; c < nclients; c++) { + + // client connects to server. + final SocketChannel cs = SocketChannel.open(); + cs.connect(serverAddr); + clients.add(cs); + + // accept connection on server. + sockets.add(ss.accept()); + + // write to each SocketChannel (after connect/accept) + cs.write(ByteBuffer.wrap(data)); + } + + return null; + + } + + }); + + /* + * Now read from all Sockets accepted on the server. + * + * Note: This is a simple loop, not a parallel read. The same buffer + * is reused on each iteration. + */ + { + + final byte[] dst = new byte[DATA_LEN]; + + for (Socket s : sockets) { + + assertFalse(s.isClosed()); + + final InputStream instr = s.getInputStream(); + + assertFalse(-1 == instr.read(dst)); // doesn't return -1 + + assertTrue(BytesUtil.bytesEqual(data, dst)); + + // Close each Socket to ensure it is different + s.close(); + + assertTrue(s.isClosed()); + + } + + } + + } finally { + + // ensure client side connections are closed. + for (SocketChannel ch : clients) { + if (ch != null) + ch.close(); + } + + // ensure server side connections are closed. + for (Socket s : sockets) { + if (s != null) + s.close(); + } + + // close the server socket. + ss.close(); + + } + + } + + /** wrap the ServerSocket accept with a timeout check. */ + private Socket accept(final ServerSocket ss) { + + final AtomicReference<Socket> av = new AtomicReference<Socket>(); + + assertNoTimeout(1, TimeUnit.SECONDS, new Callable<Void>() { + + @Override + public Void call() throws Exception { + + av.set(ss.accept()); + + return null; + } + }); + + return av.get(); + } + + /** + * Fail the test if the {@link Callable} completes before the specified + * timeout. + * + * @param timeout + * @param unit + * @param callable + */ + private void assertTimeout(final long timeout, final TimeUnit unit, + final Callable<Void> callable) { + final ExecutorService es = Executors.newSingleThreadExecutor(); + final Future<Void> ret = es.submit(callable); + final long begin = System.currentTimeMillis(); + try { + // await Future with timeout. + ret.get(timeout, unit); + final long elapsed = System.currentTimeMillis() - begin; + fail("Expected timeout: elapsed=" + elapsed + "ms, timeout=" + + timeout + " " + unit); + } catch (TimeoutException e) { + // that is expected + final long elapsed = System.currentTimeMillis() - begin; + if (log.isInfoEnabled()) + log.info("timeout after " + elapsed + "ms"); + return; + } catch (Exception e) { + final long elapsed = System.currentTimeMillis() - begin; + fail("Expected timeout: elapsed=" + elapsed + ", timeout=" + + timeout + " " + unit, e); + } finally { + log.warn("Cancelling task - should interrupt accept()"); + ret.cancel(true/* mayInterruptIfRunning */); + es.shutdown(); + } + } + + /** + * Throws {@link AssertionFailedError} if the {@link Callable} does not + * succeed within the timeout. + * + * @param timeout + * @param unit + * @param callable + * + * @throws AssertionFailedError + * if the {@link Callable} does not succeed within the timeout. + * @throws AssertionFailedError + * if the {@link Callable} fails. + */ + private void assertNoTimeout(final long timeout, final TimeUnit unit, + final Callable<Void> callable) { + final ExecutorService es = Executors.newSingleThreadExecutor(); + try { + final Future<Void> ret = es.submit(callable); + ret.get(timeout, unit); + } catch (TimeoutException e) { + fail("Unexpected timeout"); + } catch (Exception e) { + fail("Unexpected Exception", e); + } finally { + es.shutdown(); + } + } + + /** + * Task writes the data on the client {@link SocketChannel}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ + private static class WriteBufferTask implements Callable<Void> { + + final private ByteBuffer buf; + final private SocketChannel cs; + + public WriteBufferTask(final SocketChannel cs, final ByteBuffer buf) { + this.cs = cs; + this.buf = buf; + } + + @Override + public Void call() throws Exception { + cs.write(buf); + return null; + } + + } + +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2013-12-19 17:15:27
|
Revision: 7683 http://bigdata.svn.sourceforge.net/bigdata/?rev=7683&view=rev Author: martyncutcher Date: 2013-12-19 17:15:20 +0000 (Thu, 19 Dec 2013) Log Message: ----------- Wraps the ClosedByImterruptException and re-throws rather than propagating an interrupt. TestHA3Kills still green. Modified Paths: -------------- branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java Modified: branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-12-19 15:26:32 UTC (rev 7682) +++ branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-12-19 17:15:20 UTC (rev 7683) @@ -3634,8 +3634,11 @@ if (InnerCause.isInnerCause(t, ClosedByInterruptException.class)) { // propagate interrupt - Thread.currentThread().interrupt(); - return; + // Thread.currentThread().interrupt(); + + // wrap and re-throw + throw new RuntimeException(t); + // return; } /* * Error handler. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2013-12-19 15:26:41
|
Revision: 7682 http://bigdata.svn.sourceforge.net/bigdata/?rev=7682&view=rev Author: martyncutcher Date: 2013-12-19 15:26:32 +0000 (Thu, 19 Dec 2013) Log Message: ----------- Add check for ClosedByInterruptedException to handleReplicatedWrite, this was occasionally forcing a service to enterErrorState and break the quorum Modified Paths: -------------- branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java Modified: branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-12-19 14:58:34 UTC (rev 7681) +++ branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-12-19 15:26:32 UTC (rev 7682) @@ -31,6 +31,7 @@ import java.net.InetSocketAddress; import java.net.URL; import java.nio.ByteBuffer; +import java.nio.channels.ClosedByInterruptException; import java.rmi.Remote; import java.util.LinkedHashMap; import java.util.List; @@ -3629,6 +3630,13 @@ Thread.currentThread().interrupt(); return; } + // Add check for ClosedByInterruptException - but is this sufficient if the channel is now closed? + if (InnerCause.isInnerCause(t, + ClosedByInterruptException.class)) { + // propagate interrupt + Thread.currentThread().interrupt(); + return; + } /* * Error handler. * This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-19 14:58:42
|
Revision: 7681 http://bigdata.svn.sourceforge.net/bigdata/?rev=7681&view=rev Author: thompsonbry Date: 2013-12-19 14:58:34 +0000 (Thu, 19 Dec 2013) Log Message: ----------- Added a check for no Content-Type header to avoid an NPE. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2013-12-18 21:47:44 UTC (rev 7680) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2013-12-19 14:58:34 UTC (rev 7681) @@ -140,7 +140,7 @@ final String namespace = getNamespace(req); final String contentType = req.getContentType(); - + if(contentType==null) buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, "Content-Type not specified."); if (log.isInfoEnabled()) log.info("Request body: " + contentType); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-18 21:47:51
|
Revision: 7680 http://bigdata.svn.sourceforge.net/bigdata/?rev=7680&view=rev Author: thompsonbry Date: 2013-12-18 21:47:44 +0000 (Wed, 18 Dec 2013) Log Message: ----------- turning back on snapshot builds. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.properties Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.properties 2013-12-18 21:04:13 UTC (rev 7679) +++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2013-12-18 21:47:44 UTC (rev 7680) @@ -89,7 +89,7 @@ # Set true to do a snapshot build. This changes the value of ${version} to # include the date. -snapshot=false +snapshot=true # Javadoc build may be disabled using this property. The javadoc target will # not be executed unless this property is defined (its value does not matter). This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-18 21:04:19
|
Revision: 7679 http://bigdata.svn.sourceforge.net/bigdata/?rev=7679&view=rev Author: thompsonbry Date: 2013-12-18 21:04:13 +0000 (Wed, 18 Dec 2013) Log Message: ----------- Bigdata 1.3.0 release (HA) Added Paths: ----------- tags/BIGDATA_RELEASE_1_3_0/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-18 21:00:49
|
Revision: 7678 http://bigdata.svn.sourceforge.net/bigdata/?rev=7678&view=rev Author: thompsonbry Date: 2013-12-18 21:00:42 +0000 (Wed, 18 Dec 2013) Log Message: ----------- bumped version number. set snapshot=false for release. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.properties Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.properties 2013-12-18 19:07:42 UTC (rev 7677) +++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2013-12-18 21:00:42 UTC (rev 7678) @@ -84,12 +84,12 @@ release.dir=ant-release # The build version (note: 0.82b -> 0.82.0); 0.83.2 is followed by 1.0.0 -build.ver=1.2.3 +build.ver=1.3.0 build.ver.osgi=1.0 # Set true to do a snapshot build. This changes the value of ${version} to # include the date. -snapshot=true +snapshot=false # Javadoc build may be disabled using this property. The javadoc target will # not be executed unless this property is defined (its value does not matter). This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-18 19:07:49
|
Revision: 7677 http://bigdata.svn.sourceforge.net/bigdata/?rev=7677&view=rev Author: thompsonbry Date: 2013-12-18 19:07:42 +0000 (Wed, 18 Dec 2013) Log Message: ----------- javadoc Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServer.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServer.java 2013-12-18 17:28:28 UTC (rev 7676) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServer.java 2013-12-18 19:07:42 UTC (rev 7677) @@ -42,6 +42,10 @@ * of a quorum of 3. The quorum will not meet for these unit tests. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * TODO Add test to verify that we do not permit a double-start of a + * service (correctly fails, reporting that the service is already + * running). */ public class TestHAJournalServer extends AbstractHA3JournalServerTestCase { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |