This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tho...@us...> - 2013-08-20 18:54:35
|
Revision: 7301 http://bigdata.svn.sourceforge.net/bigdata/?rev=7301&view=rev Author: thompsonbry Date: 2013-08-20 18:54:28 +0000 (Tue, 20 Aug 2013) Log Message: ----------- Added the GAS test suite into CI. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/TestAll.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/TestAll.java 2013-08-20 18:30:27 UTC (rev 7300) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/TestAll.java 2013-08-20 18:54:28 UTC (rev 7301) @@ -85,7 +85,10 @@ // test the bulk data loader : @todo use proxy tests and move into per-store suites? suite.addTest( com.bigdata.rdf.load.TestAll.suite() ); - + + // test RDF graph mining/analytics + suite.addTest( com.bigdata.rdf.graph.TestAll.suite() ); + return suite; } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-20 18:30:38
|
Revision: 7300 http://bigdata.svn.sourceforge.net/bigdata/?rev=7300&view=rev Author: thompsonbry Date: 2013-08-20 18:30:27 +0000 (Tue, 20 Aug 2013) Log Message: ----------- Initial commit of GAS based graph mining support. This commit includes BFS and SSSP and uses a compact frontier. There are some basic unit tests for GATHER, BFS, and SSSP. Processing is not optimized. The GATHER, SCATTER, and APPLY phases are single threaded. One of the next steps will be to use a thread pool to parallelize those operations. See #629 Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/EdgesEnum.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/Factory.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/GASUtil.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASContext.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASEngine.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASOptions.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASProgram.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IReducer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/analytics/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/analytics/BFS.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/analytics/SSSP.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/GASEngine.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/GASGraphUtil.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/AbstractGraphTestCase.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/TestAll.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/analytics/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/analytics/TestAll.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/analytics/TestBFS.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/analytics/TestBFS0.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/analytics/TestSSSP.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/data/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/data/smallGraph.ttl branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/PerformanceTest.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/TestAll.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/TestGather.java Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/EdgesEnum.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/EdgesEnum.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/EdgesEnum.java 2013-08-20 18:30:27 UTC (rev 7300) @@ -0,0 +1,71 @@ +package com.bigdata.rdf.graph; + +/** + * Typesafe enumeration used to specify whether a GATHER or SCATTER phase is + * applied to the in-edges, out-edges, both, or not run. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ +public enum EdgesEnum { + + /** The phase is not run. */ + NoEdges(false/* inEdges */, false/* outEdges */), + + /** The phase is applied only to the in-edges. */ + InEdges(true/* inEdges */, false/* outEdges */), + + /** The phase is applied only to the out-edges. */ + OutEdges(false/* inEdges */, true/* outEdges */), + + /** The phase is applied to all edges (both in-edges and out-edges). */ + AllEdges(true/* inEdges */, true/* outEdges */); + + private EdgesEnum(final boolean inEdges, final boolean outEdges) { + this.inEdges = inEdges; + this.outEdges = outEdges; + } + + private final boolean inEdges; + private final boolean outEdges; + + /** + * Return <code>true</code>iff the in-edges will be visited. + */ + public boolean doInEdges() { + return inEdges; + } + + /** + * Return <code>true</code>iff the out-edges will be visited. + */ + public boolean doOutEdges() { + return outEdges; + } + + /** + * Return <code>true</code> iff the value is either {@link #InEdges} or + * {@link #OutEdges}. + */ + public boolean isDirected() { + switch (this) { + case NoEdges: + /* + * Note: None is neither directed nor non-directed regardless of the + * value returned here. The caller should not be inquiring about + * directedness of the GATHER or SCATTER unless they will be + * executing that stage. + */ + return false; + case AllEdges: + return false; + case InEdges: + return true; + case OutEdges: + return true; + default: + throw new UnsupportedOperationException(); + } + } + +} \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/Factory.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/Factory.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/Factory.java 2013-08-20 18:30:27 UTC (rev 7300) @@ -0,0 +1,28 @@ +package com.bigdata.rdf.graph; + +/** + * Singleton pattern for initializing a vertex state or edge state object + * given the vertex or edge. + * + * @param <V> + * The vertex or the edge. + * @param <T> + * The object that will model the state of that vertex or edge in + * the computation. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ +public abstract class Factory<V, T> { + + /** + * Factory pattern. + * + * @param value + * The value that provides the scope for the object. + * + * @return The factory generated object. + */ + abstract public T initialValue(V value); + +} \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/GASUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/GASUtil.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/GASUtil.java 2013-08-20 18:30:27 UTC (rev 7300) @@ -0,0 +1,41 @@ +package com.bigdata.rdf.graph; + + +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.spo.ISPO; + +/** + * Utility class for operations on the public interfaces. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class GASUtil { + +// private static final Logger log = Logger.getLogger(GASUtil.class); + + /** + * Return the other end of a link. + * + * @param u + * One end of the link. + * @param e + * The link. + * + * @return The other end of the link. + * + * FIXME We can optimize this to use reference testing if we are + * careful in the GATHER and SCATTER implementations to always use + * the {@link IV} values on the {@link ISPO} object that is exposed + * to the {@link IGASProgram}. + */ + @SuppressWarnings("rawtypes") + public static IV getOtherVertex(final IV u, final ISPO e) { + + if (e.s().equals(u)) + return e.o(); + + return e.s(); + + } + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASContext.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASContext.java 2013-08-20 18:30:27 UTC (rev 7300) @@ -0,0 +1,80 @@ +package com.bigdata.rdf.graph; + +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.spo.ISPO; + +/** + * Execution context for an {@link IGASProgram}. This is distinct from the + * {@link IGASEngine} so we can support distributed evaluation. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + * + * @param <VS> + * The generic type for the per-vertex state. This is scoped to the + * computation of the {@link IGASProgram}. + * @param <ES> + * The generic type for the per-edge state. This is scoped to the + * computation of the {@link IGASProgram}. + * @param <ST> + * The generic type for the SUM. This is often directly related to + * the generic type for the per-edge state, but that is not always + * true. The SUM type is scoped to the GATHER + SUM operation (NOT + * the computation). + */ +@SuppressWarnings("rawtypes") +public interface IGASContext<VS, ES, ST> { + + /** + * Schedule a vertex for execution. + * + * @param v + * The vertex. + */ + void schedule(IV v); + + /** + * Return the current evaluation round (origin ZERO). + */ + int round(); + + /** + * Get the state for the vertex using the appropriate factory. If this is + * the first visit for that vertex, then the state is initialized using the + * factory. Otherwise the existing state is returned. + * + * @param v + * The vertex. + * + * @return The state for that vertex. + * + * @see IGASProgram#getVertexStateFactory() + */ + VS getState(IV v); + + /** + * Get the state for the edge using the appropriate factory. If this is the + * first visit for that edge, then the state is initialized using the + * factory. Otherwise the existing state is returned. + * + * @param v + * The vertex. + * + * @return The state for that vertex. + * + * @see IGASProgram#getEdgeStateFactory() + */ + ES getState(ISPO e); + + /** + * Compute a reduction over the vertex state table (all vertices that have + * had their vertex state materialized). + * + * @param op + * The reduction operation. + * + * @return The reduction. + */ + <T> T reduce(IReducer<VS, ES, ST, T> op); + +} \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASEngine.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASEngine.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASEngine.java 2013-08-20 18:30:27 UTC (rev 7300) @@ -0,0 +1,74 @@ +package com.bigdata.rdf.graph; + +import java.util.concurrent.Callable; + +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.store.AbstractTripleStore; + +/** + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @param <VS> + * The generic type for the per-vertex state. This is scoped to the + * computation of the {@link IGASProgram}. + * @param <ES> + * The generic type for the per-edge state. This is scoped to the + * computation of the {@link IGASProgram}. + * @param <ST> + * The generic type for the SUM. This is often directly related to + * the generic type for the per-edge state, but that is not always + * true. The SUM type is scoped to the GATHER + SUM operation (NOT + * the computation). + * + * FIXME This should be refactored to allow a singleton for the + * {@link IGASEngine} (for a server process, much like a QueryEngine) + * and then to create an {@link IGASContext} to execute an + * {@link IGASProgram}. This would allow us to reuse resources within + * the {@link IGASEngine}. + */ +public interface IGASEngine<VS, ES, ST> extends Callable<Void> { + + /** + * Return the graph. + */ + AbstractTripleStore getKB(); + + /** + * Return the program that is being evaluated. + */ + IGASProgram<VS, ES, ST> getGASProgram(); + + /** + * The execution context for the {@link IGASEngine}. + */ + IGASContext<VS, ES, ST> getGASContext(); + + /** + * {@link #reset()} the computation state and populate the initial frontier. + * + * @param v + * One or more vertices that will be included in the initial + * frontier. + * + * @throws IllegalArgumentException + * if no vertices are specified. + */ + void init(@SuppressWarnings("rawtypes") IV... v); + + /** + * Discard computation state (the frontier, vertex state, and edge state) + * and reset the round counter. + * <p> + * Note: The graph is NOT part of the computation and is not discared by + * this method. + */ + void reset(); + + /** + * Execute one iteration. + * + * @return true iff the new frontier is empty. + */ + boolean doRound(); + +} \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASOptions.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASOptions.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASOptions.java 2013-08-20 18:30:27 UTC (rev 7300) @@ -0,0 +1,55 @@ +package com.bigdata.rdf.graph; + +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.spo.ISPO; + +/** + * Interface for options that are understood by the {@link IGASEngine} and which + * may be declared by the {@link IGASProgram}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * TODO Option to materialize Literals (or to declare the set of + * literals of interest). How do we do a gather of the attribute values + * for a vertex? That would be the SPO index for clustered access, so + * this should be done at the same time that we SCATTER over out-edges, + * which implies that the SCATTER gets pushed into the APPLY which makes + * sense. + * + * TODO Option for scalable state (HTree or BTree with buffered eviction + * as per the DISTINCT filter). + * + * TODO Option to materialize the VS for the target vertex in SCATTER. + */ +public interface IGASOptions<VS, ES> { + + /** + * Return the set of edges to which the GATHER is applied -or- + * {@link EdgesEnum#NoEdges} to skip the GATHER phase. + */ + EdgesEnum getGatherEdges(); + + /** + * Return the set of edges to which the SCATTER is applied -or- + * {@link EdgesEnum#NoEdges} to skip the SCATTER phase. + */ + EdgesEnum getScatterEdges(); + + /** + * Return a factory for vertex state objects. + * <p> + * Note: A <code>null</code> value may not be allowed in the visited vertex + * map, so if the algorithm does not use vertex state, then the factory + * should return a singleton instance each time it is invoked. + */ + @SuppressWarnings("rawtypes") + Factory<IV, VS> getVertexStateFactory(); + + /** + * Return a factory for edge state objects -or- <code>null</code> if the + * {@link IGASProgram} does not use edge state (in which case the edge state + * will not be allocated or maintained). + */ + Factory<ISPO, ES> getEdgeStateFactory(); + +} \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASProgram.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASProgram.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IGASProgram.java 2013-08-20 18:30:27 UTC (rev 7300) @@ -0,0 +1,229 @@ +package com.bigdata.rdf.graph; + +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.spo.ISPO; + +/** + * Abstract interface for GAS programs. + * + * @param <VS> + * The generic type for the per-vertex state. This is scoped to the + * computation of the {@link IGASProgram}. + * @param <ES> + * The generic type for the per-edge state. This is scoped to the + * computation of the {@link IGASProgram}. + * @param <ST> + * The generic type for the SUM. This is often directly related to + * the generic type for the per-edge state, but that is not always + * true. The SUM type is scoped to the GATHER + SUM operation (NOT + * the computation). + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ +@SuppressWarnings("rawtypes") +public interface IGASProgram<VS, ES, ST> extends IGASOptions<VS, ES> { + + /* + * TODO Unfortunately this pattern of hiding our more complex interfaces can + * not be made to work without creating wrapper objects that implement the + * derived interface, even though we would just like to use it as a marker + * interface. It might be workable if we put this under the IV and ISPO + * interfaces (as simpler interfaces without generic types). + */ + +// /** +// * A shorthand for the {@link IV} interface that cleans up the generic type +// * warnings. An {@link IV} corresponds to a vertex of the graph or the value +// * of an attribute. {@link IV}s may be materialized or not. For efficiency, +// * it is better to operate without materialization of the corresponding RDF +// * {@link Value}. Many {@link IV}s are <em>inline</em> can be immediately +// * interpreted as if they were materialized RDF {@link Value}s - for +// * example, this is true by default for all <code>xsd</code> numeric +// * datatypes. It may also be true of other kinds of {@link Value}s depending +// * on how the KB was configured. +// * +// * @author <a href="mailto:tho...@us...">Bryan +// * Thompson</a> +// */ +// @SuppressWarnings("rawtypes") +// private interface IV extends com.bigdata.rdf.internal.IV { +// +// } +// +// /** +// * An edge is comprised of a Subject (s), Predicate (p), and Object (o). +// * Depending on the KB configuration, there may also be an Context (c) +// * position on the edge - when present the Context supports the concept of +// * SPARQL named graphs. +// * <dl> +// * <dt>Subject</dt> +// * <dd>The Subject is either a {@link URI} or a {@link BNode}.</dd> +// * <dt>Predicate</dt> +// * <dd>The Predicate is always a {@link URI}.</dd> +// * <dt>Object</dt> +// * <dd>The Object is either a {@link URI} (in which case the "edge" is a +// * link) or a {@link Literal} (in which case the edge is a property value).</dd> +// * <dt>Context</dt> +// * <dd>The Context is either a {@link URI} or a {@link BNode}.</dd> +// * </dl> +// * Note that the Subject, Predicate, Object, and Context will be {@link IV} +// * instances and hence might or might not be materialized RDF {@link Value}s +// * and might or might not be <em>inline</em> and hence directly inspectable +// * as if they were materialized RDF {@link Value}s. +// * +// * @author <a href="mailto:tho...@us...">Bryan +// * Thompson</a> +// */ +// private interface ISPO extends com.bigdata.rdf.spo.ISPO { +// +// /** +// * {@inheritDoc} +// */ +// @Override +// IV s(); +// +// /** +// * {@inheritDoc} +// */ +// @Override +// IV p(); +// +// /** +// * {@inheritDoc} +// */ +// @Override +// IV o(); +// +// /** +// * {@inheritDoc} +// */ +// @Override +// IV c(); +// +// } + + /** + * Callback to initialize the state for each vertex in the initial frontier + * before the first iteration. A typical use case is to set the distance of + * the starting vertex to ZERO (0). + * + * @param u + * The vertex. + */ + void init(IGASContext<VS, ES, ST> ctx, IV u); + + /** + * GATHER is a map/reduce over the edges of the vertex. The SUM provides + * pair-wise reduction over the edges visited by the GATHER. + * + * @param u + * The vertex for which the gather is being performed. The gather + * will be invoked for each edge indident on <code>u</code> (as + * specified by {@link #getGatherEdges()}). + * @param e + * An edge (s,p,o). + * + * @return The new edge state accumulant. + * + * FIXME DESIGN: The problem with pushing the ISPO onto the ES is + * that we are then forced to maintain edge state (for the purposes + * of accessing those ISPO references) even if the algorithm does + * not require any memory for the edge state! + * <p> + * Note: by lazily resolving the vertex and/or edge state in the GAS + * callback methods we avoid eagerly materializing data that we do + * not need. + * <p> + * Note: However, this might cause problems with a powergraph style + * decomposition onto a cluster since the state needs to be + * communicated up front if it will be required by the gather() for + * the edge. + * <p> + * Note: The state associated with the source/target vertex and the + * edge should all be immutable for the GATHER. The vertex state + * should only be mutable for the APPLY(). The target vertex state + * and/or edge state MAY be mutable for the SCATTER, but that + * depends on the algorithm. How can we get these constraints into + * the API? + * + * TODO If gather/scatter over ALL edges, then do we need to pass + * through a parameter so the caller can figure out what direction + * the edge points in (alternatively, pass in the vertex for which + * the gather is being performance and they can reference test both + * [s] and [o] to see which one is the vertex on which the gather is + * invoked and which one is the remote vertex. + */ + ST gather(IGASContext<VS, ES, ST> ctx, IV u, ISPO e); + + /** + * SUM is a pair-wise reduction that is applied during the GATHER. + * + * @param left + * An edge state accumulant. + * @param right + * Another edge state accumulant. + * + * @return Their "sum". + * + * TODO DESIGN: Rather than pair-wise reduction, why not use + * vectored reduction? That way we could use an array of primitives + * as well as objects. + */ + ST sum(ST left, ST right); + + /** + * Apply the reduced aggregation computed by GATHER + SUM to the vertex. + * + * @param u + * The vertex. + * @param sum + * The aggregated accumulate across the edges as computed by + * GATHER and SUM -or- <code>null</code> if there is no + * accumulant (this will happen if the GATHER did not find any + * edges to visit). + * + * @return The new state for the vertex. + * + * TODO How to indicate if there is no state change? return the same + * object? This only matters with secondary storage for the vertex + * state. Alternative is to side-effect the vertex state, but then + * we can not manage the barriers (BFS versus asynchronous). Except + * for indicating that the state is dirty, we do not need a return + * value. [The pattern appears to be that a vertex leaves a marker + * on its vertex state object indicating whether or not it was + * changed and then tests that state when deciding whether or not to + * scatter]. + * + * TODO There could be a big win here if we are able to detect when + * a newly initialized vertex state does not "escape" and simply not + * store it. For some graphs, the vertexState map grows very rapidly + * when compared to either the frontier or the set of states that + * have been in the frontier during the computation. + */ + VS apply(IGASContext<VS, ES, ST> ctx, IV u, ST sum); + + /** + * Return <code>true</code> iff the vertex should run its SCATTER phase. + * This may be used to avoid visiting the edges if it is known (e.g., based + * on the APPLY) that the vertex has not changed. This can save a + * substantial amount of effort. + * + * @param ctx + * @param u + * The vertex. + * @return + */ + boolean isChanged(IGASContext<VS, ES, ST> ctx, IV u); + + /** + * + * @param ctx + * @param u + * The vertex for which the scatter will being performed. + * @param e + * The edge. + */ + void scatter(IGASContext<VS, ES, ST> ctx, IV u, ISPO e); + +} \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IReducer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IReducer.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/IReducer.java 2013-08-20 18:30:27 UTC (rev 7300) @@ -0,0 +1,58 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Jan 16, 2008 + */ +package com.bigdata.rdf.graph; + +import com.bigdata.rdf.internal.IV; + +/** + * An interface for computing reductions over the vertices of a graph. + * + * @param <T> + * The type of the result. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: IResultHandler.java 2265 2009-10-26 12:51:06Z thompsonbry $ + */ +public interface IReducer<VS,ES, ST, T> { + + /** + * Method is invoked for each result and is responsible for combining the + * results in whatever manner is meaningful for the procedure. + * Implementations of this method MUST be <strong>thread-safe</strong>. + * + * @param result + * The result from applying the procedure to a single index + * partition. + */ + public void visit(IGASContext<VS, ES, ST> ctx, @SuppressWarnings("rawtypes") IV u); + + /** + * Return the aggregated results as an implementation dependent object. + */ + public T get(); + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/analytics/BFS.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/analytics/BFS.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/analytics/BFS.java 2013-08-20 18:30:27 UTC (rev 7300) @@ -0,0 +1,188 @@ +package com.bigdata.rdf.graph.analytics; + +import com.bigdata.rdf.graph.EdgesEnum; +import com.bigdata.rdf.graph.Factory; +import com.bigdata.rdf.graph.IGASContext; +import com.bigdata.rdf.graph.IGASProgram; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.spo.ISPO; + +/** + * Breadth First Search (BFS) is an iterative graph traversal primitive. The + * frontier is expanded iteratively until no new vertices are discovered. Each + * visited vertex is marked with the round (origin ZERO) in which it was + * visited. This is its distance from the initial frontier. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +@SuppressWarnings("rawtypes") +public class BFS implements IGASProgram<BFS.VS, BFS.ES, Void> { + + static class VS { + + /** + * <code>-1</code> until visited. When visited, set to the current round + * in order to assign each vertex its traversal depth. + */ + private int depth = -1; + + /** + * The depth at which this vertex was first visited (origin ZERO) and + * <code>-1</code> if the vertex has not been visited. + */ + public int depth() { + synchronized (this) { + return depth; + } + } + + /** + * Note: This marks the vertex at the current traversal depth. + * <p> + * Note: It is possible that the same vertex may be visited multiple + * times in a given expansion (from one or more source vertices that all + * target the same destination vertex). + * + * @return <code>true</code> if the vertex was visited for the first + * time in this round and the calling thread is the thread that + * first visited the vertex (this helps to avoid multiple + * scheduling of a vertex). + */ + public boolean visit(final int depth) { + synchronized (this) { + if (this.depth == -1) { + this.depth = depth; + return true; + } + return false; + } + } + + @Override + public String toString() { + return "{depth=" + depth() + "}"; + } + + }// class VS + + /** + * Edge state is not used. + */ + static class ES { + + } + + private static final Factory<IV, BFS.VS> vertexStateFactory = new Factory<IV, BFS.VS>() { + + @Override + public BFS.VS initialValue(final IV value) { + + return new VS(); + + } + + }; + + @Override + public Factory<IV, BFS.VS> getVertexStateFactory() { + + return vertexStateFactory; + + } + + @Override + public Factory<ISPO, BFS.ES> getEdgeStateFactory() { + + return null; + + } + + @Override + public EdgesEnum getGatherEdges() { + + return EdgesEnum.NoEdges; + + } + + @Override + public EdgesEnum getScatterEdges() { + + return EdgesEnum.OutEdges; + + } + + /** + * Not used. + */ + @Override + public void init(IGASContext<BFS.VS, BFS.ES, Void> ctx, IV u) { + ctx.getState(u).visit(0); + + } + + /** + * Not used. + */ + @Override + public Void gather(IGASContext<BFS.VS, BFS.ES, Void> ctx, IV u, ISPO e) { + throw new UnsupportedOperationException(); + } + + /** + * Not used. + */ + @Override + public Void sum(Void left, Void right) { + throw new UnsupportedOperationException(); + } + + /** + * NOP + */ + @Override + public BFS.VS apply(final IGASContext<BFS.VS, BFS.ES, Void> ctx, final IV u, + final Void sum) { + + return null; + + } + + /** + * Returns <code>true</code>. + */ + @Override + public boolean isChanged(IGASContext<VS, ES, Void> ctx, IV u) { + + return true; + + } + + /** + * The remote vertex is scheduled for activation unless it has already been + * visited. + * <p> + * Note: We are scattering to out-edges. Therefore, this vertex is + * {@link ISPO#s()}. The remote vertex is {@link ISPO#o()}. + */ + @Override + public void scatter(final IGASContext<BFS.VS, BFS.ES, Void> ctx, + final IV u, final ISPO e) { + + // remote vertex state. + final VS otherState = ctx.getState(e.o()); + + // visit. + if (otherState.visit(ctx.round() + 1)) { + + /* + * This is the first visit for the remote vertex. Add it to the + * schedule for the next iteration. + */ + + ctx.schedule(e.o()); + + } + + } + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/analytics/SSSP.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/analytics/SSSP.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/analytics/SSSP.java 2013-08-20 18:30:27 UTC (rev 7300) @@ -0,0 +1,293 @@ +package com.bigdata.rdf.graph.analytics; + +import org.apache.log4j.Logger; + +import com.bigdata.rdf.graph.EdgesEnum; +import com.bigdata.rdf.graph.Factory; +import com.bigdata.rdf.graph.GASUtil; +import com.bigdata.rdf.graph.IGASContext; +import com.bigdata.rdf.graph.IGASProgram; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.spo.ISPO; + +/** + * SSSP (Single Source, Shortest Path). This analytic computes the shortest path + * to each vertex in the graph starting from the given vertex. Only connected + * vertices are visited by this implementation (the frontier never leaves the + * connected component in which the starting vertex is located). + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * TODO There is no reason to do a gather on the first round. Add + * isGather()? (parallel to isChanged() for scatter?) + * + * TODO Add reducer pattern for finding the maximum degree vertex. + * + * TODO Add parameter for directed versus undirected SSSP. When + * undirected, the gather and scatter are for AllEdges. Otherwise, + * gather on in-edges and scatter on out-edges. Also, we need to use a + * getOtherVertex(e) method to figure out the other edge when using + * undirected scatter/gather. Add unit test for undirected. + */ +@SuppressWarnings("rawtypes") +public class SSSP implements IGASProgram<SSSP.VS, SSSP.ES, Integer/* dist */> { + + private static final Logger log = Logger.getLogger(SSSP.class); + + /** + * The length of an edge. + * + * FIXME This should be modified to use link weights with RDR. We need a + * pattern to get the link attributes materialized with the {@link ISPO} for + * the link. That could be done using a read-ahead filter on the striterator + * if the link weights are always clustered with the ground triple. + * + * When we make this change, the distance should be of the same type as the + * link weight or generalized as <code>double</code>. + */ + private final static int EDGE_LENGTH = 1; + + static class VS { + + /** + * The minimum observed distance (in hops) from the source to this + * vertex and initially {@link Integer#MAX_VALUE}. When this value is + * modified, the {@link #changed} flag is set as a side-effect. + */ + private int dist = Integer.MAX_VALUE; + + private boolean changed = false; + +// /** +// * Set the distance for the vertex to ZERO. This is done for the +// * starting vertex. +// */ +// public void zero() { +// synchronized (this) { +// dist = 0; +// changed = true; +// } +// } + + /** + * Return <code>true</code> if the {@link #dist()} was updated by the + * last APPLY. + */ + public boolean isChanged() { + synchronized (this) { + return changed; + } + } + + /** + * The current estimate of the minimum distance from the starting vertex + * to this vertex and {@link Integer#MAX_VALUE} until this vertex is + * visited. + */ + public int dist() { + synchronized (this) { + return dist; + } + } + + @Override + public String toString() { + + return "{dist=" + dist() + ", changed=" + isChanged() + "}"; + + } + + }// class VS + + /** + * Edge state is not used. + */ + static class ES { + + } + + private static final Factory<IV, SSSP.VS> vertexStateFactory = new Factory<IV, SSSP.VS>() { + + @Override + public SSSP.VS initialValue(final IV value) { + + return new VS(); + + } + + }; + + @Override + public Factory<IV, SSSP.VS> getVertexStateFactory() { + + return vertexStateFactory; + + } + + @Override + public Factory<ISPO, SSSP.ES> getEdgeStateFactory() { + + return null; + + } + + @Override + public EdgesEnum getGatherEdges() { + + return EdgesEnum.InEdges; + + } + + @Override + public EdgesEnum getScatterEdges() { + + return EdgesEnum.OutEdges; + + } + + /** + * Set the {@link VS#dist()} to ZERO (0). + * <p> + * {@inheritDoc} + */ + @Override + public void init(final IGASContext<SSSP.VS, SSSP.ES, Integer> ctx, + final IV u) { + + final VS us = ctx.getState(u); + + synchronized (us) { + + // Set distance to zero for starting vertex. + us.dist = 0; + + // Must be true to trigger scatter in the 1st round! + us.changed = true; + + } + + } + + /** + * <code>src.dist + edge_length (1)</code> + * <p> + * {@inheritDoc} + */ + @Override + public Integer gather(final IGASContext<SSSP.VS, SSSP.ES, Integer> ctx, + final IV u, final ISPO e) { + +// assert e.o().equals(u); + + final VS src = ctx.getState(e.s()); + + final int d = src.dist(); + + if (d == Integer.MAX_VALUE) { + + // Note: Avoids overflow (wrapping around to a negative value). + return d; + + } + + return d + EDGE_LENGTH; + + } + + /** + * MIN + */ + @Override + public Integer sum(final Integer left, final Integer right) { + + return Math.min(left, right); + + } + + /** + * Update the {@link VS#dist()} and {@link VS#isChanged()} based on the new + * <i>sum</i>. + * <p> + * {@inheritDoc} + */ + @Override + public SSSP.VS apply(IGASContext<SSSP.VS, SSSP.ES, Integer> ctx, + final IV u, final Integer sum) { + + if (sum != null) { + +// log.error("u=" + u + ", us=" + us + ", sum=" + sum); + + // Get the state for that vertex. + final SSSP.VS us = ctx.getState(u); + + final int minDist = sum; + + synchronized(us) { + us.changed = false; + if (us.dist > minDist) { + us.dist = minDist; + us.changed = true; + if (log.isDebugEnabled()) + log.debug("u=" + u + ", us=" + us + ", minDist=" + minDist); + return us; + } + } + } + + // No change. + return null; + + } + + @Override + public boolean isChanged(final IGASContext<SSSP.VS, SSSP.ES, Integer> ctx, + final IV u) { + + return ctx.getState(u).isChanged(); + + } + + /** + * The remote vertex is scheduled if this vertex is changed. + * <p> + * Note: We are scattering to out-edges. Therefore, this vertex is + * {@link ISPO#s()}. The remote vertex is {@link ISPO#o()}. + * <p> + * {@inheritDoc} + */ + @Override + public void scatter(final IGASContext<SSSP.VS, SSSP.ES, Integer> ctx, + final IV u, final ISPO e) { + + final IV other = GASUtil.getOtherVertex(u, e); + + final VS selfState = ctx.getState(u); + + final VS otherState = ctx.getState(other); + + // last observed distance for the remote vertex. + final int otherDist = otherState.dist(); + + // new distance for the remote vertex. + final int newDist = selfState.dist() + EDGE_LENGTH; + + if (newDist < otherDist) { + + synchronized (otherState) { + otherState.dist = newDist; + otherState.changed = true; + } + + if (log.isDebugEnabled()) + log.debug("u=" + u + " @ " + selfState.dist() + + ", scheduling: " + other + " with newDist=" + newDist); + + // Then add the remote vertex to the next frontier. + ctx.schedule(e.o()); + + } + + } + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/GASEngine.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/GASEngine.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/GASEngine.java 2013-08-20 18:30:27 UTC (rev 7300) @@ -0,0 +1,782 @@ +package com.bigdata.rdf.graph.impl; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.log4j.Logger; +import org.eclipse.jetty.util.ConcurrentHashSet; + +import com.bigdata.rdf.graph.EdgesEnum; +import com.bigdata.rdf.graph.Factory; +import com.bigdata.rdf.graph.IGASContext; +import com.bigdata.rdf.graph.IGASEngine; +import com.bigdata.rdf.graph.IGASProgram; +import com.bigdata.rdf.graph.IReducer; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.model.BigdataValue; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.SPOFilter; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.relation.accesspath.IElementFilter; +import com.bigdata.striterator.ChunkedStriterator; +import com.bigdata.striterator.EmptyChunkedIterator; +import com.bigdata.striterator.IChunkedIterator; + +/** + * {@link IGASEngine} for dynamic activation of vertices. This implementation + * maintains a frontier and lazily initializes the vertex state when the vertex + * is visited for the first time. This is appropriate for algorithms, such as + * BFS, that use a dynamic frontier. + * + * TODO Algorithms that need to visit all vertices in each round (CC, BC, PR) + * can be more optimially executed by a different implementation strategy. The + * vertex state should be arranged in a dense map (maybe an array) and presized. + * For example, this could be done on the first pass when we identify a vertex + * index for each distinct V in visitation order. + * + * TODO Vectored expansion with conditional materialization of attribute values + * could be achieved using CONSTRUCT. This would force URI materialization as + * well. If we drop down one level, then we can push in the frontier and avoid + * the materialization. Or we can just write an operator that accepts a frontier + * and returns the new frontier and which maintains an internal map containing + * both the visited vertices, the vertex state, and the edge state. + * + * TODO Some computations could be maintained and accelerated. A great example + * is Shortest Path (as per RDF3X). Reachability queries for a hierarchy can + * also be maintained and accelerated (again, RDF3X using a ferrari index). + * + * TODO Some of the more interesting questions are how to handle dynamic graphs. + * This is not yet considered at all by this code. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +@SuppressWarnings("rawtypes") +public class GASEngine<VS, ES, ST> implements IGASEngine<VS, ES, ST>, + IGASContext<VS, ES, ST> { + + private static final Logger log = Logger.getLogger(GASEngine.class); + + /** + * Filter visits only edges (filters out attribute values). + * <p> + * Note: This filter is pushed down onto the AP and evaluated close to the + * data. + * + * TODO Lift out as static utility class. + */ + static final IElementFilter<ISPO> edgeOnlyFilter = new SPOFilter<ISPO>() { + private static final long serialVersionUID = 1L; + + @Override + public boolean isValid(final Object e) { + return ((ISPO) e).o().isURI(); + } + }; + + /** + * The KB (aka graph). + * <p> + * Note: This COULD be scale-out with remote indices or running embedded + * inside of a HA server. However, for scale-out we want to partition the + * work and the VS/ES so that would imply a different {@link IGASEngine} + * design. + */ + private final AbstractTripleStore kb; + + /** + * The graph analytic to be executed. + */ + private final IGASProgram<VS, ES, ST> program; + + /** + * Factory for the vertex state objects. + */ + private final Factory<IV, VS> vsf; + + /** + * Factory for the edge state objects. + */ + private final Factory<ISPO, ES> esf; + + /** + * The state associated with each visited vertex. + * + * TODO Offer scalable backend with high throughput, e.g., using a batched + * striped lock as per DISTINCT. + */ + private final ConcurrentMap<IV, VS> vertexState = new ConcurrentHashMap<IV, VS>(); + + /** + * TODO Edge state needs to be configurable. When disabled, leave this as + * <code>null</code>. + */ + private final ConcurrentMap<ISPO, ES> edgeState = null; + + /** + * The set of vertices that were identified in the current iteration. + */ + @SuppressWarnings("unchecked") + private final ConcurrentHashSet<IV>[] frontier = new ConcurrentHashSet[2]; + + /** + * The current evaluation round. + */ + private final AtomicInteger round = new AtomicInteger(0); + + @Override + public VS getState(final IV v) { + + VS vs = vertexState.get(v); + + if (vs == null) { + + VS old = vertexState.putIfAbsent(v, vs = vsf.initialValue(v)); + + if (old != null) { + + // Lost data race. + vs = old; + + } + + } + + return vs; + + } + + @Override + public ES getState(final ISPO e) { + + if (edgeState == null) + return null; + + ES es = edgeState.get(e); + + if (es == null) { + + ES old = edgeState.putIfAbsent(e, es = esf.initialValue(e)); + + if (old != null) { + + // Lost data race. + es = old; + + } + + } + + return es; + + } + + /** + * The current frontier. + */ + protected Set<IV> frontier() { + + return frontier[round.get() % 2]; + + } + + /** + * The new frontier - this is populated during the round. At the end of the + * round, the new frontier replaces the current frontier (this happens when + * we increment the {@link #round()}). If the current frontier is empty + * after that replacement, then the traversal is done. + */ + protected Set<IV> newFrontier() { + + return frontier[(round.get() + 1) % 2]; + + } + + @Override + public int round() { + + return round.get(); + + } + + @Override + public AbstractTripleStore getKB() { + return kb; + } + + @Override + public IGASProgram<VS, ES, ST> getGASProgram() { + return program; + } + + @Override + public IGASContext<VS, ES, ST> getGASContext() { + return this; + } + + public GASEngine(final AbstractTripleStore kb, + final IGASProgram<VS, ES, ST> program) { + + if (kb == null) + throw new IllegalArgumentException(); + if (program == null) + throw new IllegalArgumentException(); + + this.kb = kb; + + this.program = program; + + this.vsf = program.getVertexStateFactory(); + + this.esf = program.getEdgeStateFactory(); + + /* + * TODO set options; setup factory objects; etc. + * + * TODO Could dynamically instantiate the appropriate IGASEngine + * implementation based on those options and either a factory or + * delegation pattern. + */ + + this.frontier[0] = new ConcurrentHashSet<IV>(); + + this.frontier[1] = new ConcurrentHashSet<IV>(); + + } + + @Override + public void init(final IV... vertices) { + + if (vertices == null) + throw new IllegalArgumentException(); + + reset(); + + for (IV v : vertices) { + + // Put into the current frontier. + frontier().add(v); + + /* + * Callback to initialize the vertex state before the first + * iteration. + */ + program.init(getGASContext(), v); + + } + + } + + @Override + public void reset() { + + round.set(0); + + vertexState.clear(); + + if (edgeState != null) + edgeState.clear(); + + frontier().clear(); + + newFrontier().clear(); + + } + + @Override + public Void call() throws Exception { + + while (!frontier().isEmpty()) { + + doRound(); + + } + + if (log.isInfoEnabled()) + log.info("Done: #rounds=" + round()); + + traceState(); + + return null; + + } + + /** + * Trace reports on the details of the frontier, verticx state, and edge + * state. + * + * TODO edgeState is not being traced out. + */ + @SuppressWarnings("unchecked") + private void traceState() { + + if (!log.isTraceEnabled()) + return; + + // Get all terms in the frontier. + final Set<IV<?,?>> tmp = new HashSet((Collection) frontier()); + + // Add all IVs for the vertexState. + tmp.addAll((Collection) vertexState.keySet()); + + // Batch resolve all IVs. + final Map<IV<?, ?>, BigdataValue> m = kb.getLexiconRelation().getTerms( + tmp); + + log.trace("frontier: size=" + frontier().size()); + + for (IV v : frontier()) { + + log.trace("frontier: iv=" + v + " (" + m.get(v) + ")"); + + } + + log.trace("vertexState: size=" + vertexState.size()); + + for (Map.Entry<IV, VS> e : vertexState.entrySet()) { + + final IV v = e.getKey(); + + final BigdataValue val = m.get(v); + + log.trace("vertexState: vertex=" + v + " (" + val + "), state=" + + e.getValue()); + + } + + } + + /** + * {@inheritDoc} + * + * TODO This is an Asynchronous implementation. Further, it does not order + * the vertices to accelerate converenge (unlike GraphChi) and does not + * attempt to race ahead to accelerate convergence (unlike an asynchronous + * neural network). + * + * TODO There should be an option for property value access during the APPLY + * (either no property values are required, or some (or all) are required + * and must optionally be materialized. Likewise, there could be an option + * to force the materialization of the URIs for the (s,p,o). + * <p> + * Property value access is on the SPO index. If we are doing a reverse + * gather (out-edges) then it will be right there and the Apply should be + * pushed into the Gather. If we are doing a forward gather (in-edges), then + * we are reading on OSP and will need to do a separate read on SPO. + */ + public boolean doRound() { + + if (log.isInfoEnabled()) + log.info("Round=" + round + ", frontierSize=" + frontier().size() + + ", vertexStateSize=" + vertexState.size()); + + traceState(); + + /* + * This is the new frontier. It is initially empty. All newly discovered + * vertices are inserted into this frontier. + */ + newFrontier().clear(); + + final EdgesEnum gatherEdges = program.getGatherEdges(); + final EdgesEnum scatterEdges = program.getScatterEdges(); + + /* + * TODO This logic allows us to push down the APPLY into the GATHER or + * SCATTER depending on some characteristics of the algorithm. Is this + * worth while? + * + * TODO The ability to pushd down the APPLY for AllEdges for the GATHER + * depends on our using the union of the in-edges and out-edges + * iterators to visit those edges. That union means that we do not have + * to preserve the accumulant across the in-edges and out-edges aspects + * of the GATHER. If this UNION over the iterators causes problems with + * other optimizations, then it could be discarded. Note that this is + * not an issue for the SCATTER since we can scatter over the in-edges + * and out-edges for any given vertex independently (so long as the + * APPLY is done before the SCATTER - this would not work if we pushed + * down the APPLY into the SCATTER). + */ + final boolean pushDownApplyInGather; + final boolean pushDownApplyInScatter; + final boolean runApplyStage; + + if (scatterEdges == EdgesEnum.NoEdges) { + // Do APPLY() in GATHER. + pushDownApplyInGather = true; + pushDownApplyInScatter = false; + runApplyStage = false; + } else if (gatherEdges == EdgesEnum.NoEdges) { + // APPLY() in SCATTER. + pushDownApplyInGather = false; + pushDownApplyInScatter = true; + runApplyStage = false; + } else { + /* + * Do not push down the APPLY. + * + * TODO We could still push down the apply into the GATHER if we are + * doing both stages. + */ + pushDownApplyInGather = false; + pushDownApplyInScatter = false; + runApplyStage = true; + } + + gatherEdges(gatherEdges,pushDownApplyInGather); +// switch (gatherEdges) { +// case NoEdges: +// break; +// case InEdges: +// gatherEdges(true/*inEdges*/, pushDownApplyInGather); +// break; +// case OutEdges: +// gatherEdges(false/*outEdges*/, pushDownApplyInGather); +// break; +// case AllEdges: +// /* +// * TODO When doing the GATHER for both in-edges and out-edges, we +// * should submit two child GATHER tasks so those things run in +// * parallel. However, look first at how to parallelize within the +// * GATHER operation (multiple threads over key-range stripes or +// * threads racing to consume the frontier in order). +// * +// * TODO The same applies for the SCATTER stage. +// */ +// gatherEdges(true/* inEdges */, pushDownApplyInGather); +// gatherEdges(false/* outEdges */, pushDownApplyInGather); +// break; +// default: +// throw new UnsupportedOperationException(gatherEdges.name()); +// } + + if(runApplyStage) { + apply(); + } + + scatterEdges(scatterEdges,pushDownApplyInScatter); +// switch (scatterEdges) { +// case NoEdges: +// break; +// case OutEdges: +// scatterEdges(false/*inEdges*/, pushDownApplyInScatter); +// break; +// case InEdges: +// scatterEdges(true/*inEdges*/, pushDownApplyInScatter); +// break; +// case AllEdges: +// scatterEdges(true/* inEdges */, pushDownApplyInScatter); +// scatterEdges(false/* inEdges */, pushDownApplyInScatter); +// break; +// default: +// throw new UnsupportedOperationException(scatterEdges.name()); +// } + + // Swaps old and new frontiers. + round.incrementAndGet(); + + // True if the new frontier is empty. + return frontier().isEmpty(); + + } // doRound() + + /** + * Generate an ordered frontier to maximize the locality of reference within + * the indices. + */ + private IV[] getCompactFrontier() { + + final IV[] f; + + final int size = frontier().size(); + + frontier().toArray(f = new IV[size]); + + /* + * Order for index access. An ordered scan on a B+Tree is 10X faster + * than random access lookups. + * + * Note: This uses natural V order, which is also the index order. + */ + java.util.Arrays.sort(f); + + return f; + + } + + /** + * Do APPLY. + */ + private void apply() { + + final IGASContext<VS, ES, ST> ctx = getGASContext(); + + // Compact, ordered frontier. No duplicates! + final IV[] f = getCompactFrontier(); + + for (IV u : f) { + + program.apply(ctx, u, null/* sum */); + + } + + } + + private IChunkedIterator<ISPO> getInEdges(final IV u) { + + // in-edges: OSP / OCSP : [u] is the Object. + return kb + .getSPORelation() + .getAccessPath(null/* s */, null/* p */, u/* o */, + null/* c */, edgeOnlyFilter).iterator(); + + } + + private IChunkedIterator<ISPO> getOutEdges(final IV u) { + + // out-edges: SPO / SPOC : [u] is the Subject. + return kb + .getSPORelation() + .getAccessPath(u/* s */, null/* p */, null/* o */, + null/* c */, edgeOnlyFilter).iterator(); + + } + + @SuppressWarnings("unchecked") + private IChunkedIterator<ISPO> getEdges(final IV u, final EdgesEnum edges) { + + switch (edges) { + case NoEdges: + return new EmptyChunkedIterator<ISPO>(null/* keyOrder */); + case InEdges: + return getInEdges(u); + case OutEdges: + return getOutEdges(u); + case AllEdges:{ + final IChunkedIterator<ISPO> a = getInEdges(u); + final IChunkedIterator<ISPO> b = getOutEdges(u); + final IChunkedIterator<ISPO... [truncated message content] |
From: <tho...@us...> - 2013-08-20 18:26:26
|
Revision: 7299 http://bigdata.svn.sourceforge.net/bigdata/?rev=7299&view=rev Author: thompsonbry Date: 2013-08-20 18:26:17 +0000 (Tue, 20 Aug 2013) Log Message: ----------- Fix to com.bigdata.striterator.Striterator.append(). The AppendFilter was not properly implemented, did not close the source iterators, and lacked a unit test. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/Appender.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/ChunkedStriterator.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/MergeFilter.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/Striterator.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/striterator/TestAll.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/striterator/TestAppendFilter.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/Appender.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/Appender.java 2013-08-19 12:58:17 UTC (rev 7298) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/Appender.java 2013-08-20 18:26:17 UTC (rev 7299) @@ -42,21 +42,33 @@ private static final long serialVersionUID = 1307691066685808103L; - private final I itr2; - - public Appender(I itr2) { + private final int chunkSize; + private final I src2; - if (itr2 == null) + public Appender(final I src2) { + + this(IChunkedIterator.DEFAULT_CHUNK_SIZE, src2); + + } + + public Appender(final int chunkSize, final I src2) { + + if (chunkSize <= 0) throw new IllegalArgumentException(); - - this.itr2 = itr2; + if (src2 == null) + throw new IllegalArgumentException(); + + this.chunkSize = chunkSize; + + this.src2 = src2; + } - @SuppressWarnings("unchecked") - public I filter(I src) { + @SuppressWarnings({ "rawtypes", "unchecked" }) + public I filter(final I src) { - return (I) new AppendingIterator(src, this); + return (I) new AppendingIterator(chunkSize, src, src2); } @@ -68,32 +80,35 @@ * @param <I> * @param <E> */ - private static class AppendingIterator<I extends Iterator<E>, E> implements - Iterator<E> { + private static class AppendingIterator<I extends Iterator<E>, E> + implements IChunkedIterator<E> { + private final int chunkSize; + private final I src1; + private final I src2; + /** * Initially set to the value supplied to the ctor. When that source is - * exhausted, this is set to {@link Appender#itr2}. When the second - * source is exhausted then the total iterator is exhausted. + * exhausted, this is set to {@link #src2}. When the second source is + * exhausted then the total iterator is exhausted. */ private I src; private boolean firstSource = true; - private final Appender<I, E> filter; + public AppendingIterator(final int chunkSize, final I src, final I src2) { - /** - * @param src - * @param filter - */ - public AppendingIterator(I src, Appender<I, E> filter) { - + this.chunkSize = chunkSize; + this.src = src; + + this.src1 = src; - this.filter = filter; - + this.src2 = src2; + } + @Override public boolean hasNext() { if (src == null) { @@ -109,8 +124,9 @@ if (firstSource) { // start on the 2nd source. - src = filter.itr2; - + src = src2; + firstSource = false; + } else { // exhausted. @@ -122,6 +138,7 @@ } + @Override public E next() { if (src == null) @@ -132,6 +149,7 @@ } + @Override public void remove() { if (src == null) @@ -141,6 +159,84 @@ } + @Override + public void close() { + + // exhausted. + src = null; + + if (src1 instanceof ICloseableIterator) { + + ((ICloseableIterator<?>) src1).close(); + + } + + if (src2 instanceof ICloseableIterator) { + + ((ICloseableIterator<?>) src2).close(); + + } + + } + + /** + * The next chunk of elements in whatever order they were visited by + * {@link #next()}. + */ + @Override + @SuppressWarnings("unchecked") + public E[] nextChunk() { + + if (!hasNext()) { + + throw new NoSuchElementException(); + + } + + int n = 0; + + E[] chunk = null; + + while (hasNext() && n < chunkSize) { + + final E t = next(); + + if (chunk == null) { + + /* + * Dynamically instantiation an array of the same component + * type as the objects that we are visiting. + */ + + chunk = (E[]) java.lang.reflect.Array.newInstance(t + .getClass(), chunkSize); + + } + + // add to this chunk. + chunk[n++] = t; + + } + + if (n != chunkSize) { + + // make it dense. + + final E[] tmp = (E[]) java.lang.reflect.Array.newInstance( +// chunk[0].getClass(), + chunk.getClass().getComponentType(),// + n); + + System.arraycopy(chunk, 0, tmp, 0, n); + + chunk = tmp; + + } + + return chunk; + + } + } } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/ChunkedStriterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/ChunkedStriterator.java 2013-08-19 12:58:17 UTC (rev 7298) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/ChunkedStriterator.java 2013-08-20 18:26:17 UTC (rev 7299) @@ -88,23 +88,26 @@ } + @Override final public E[] nextChunk() { return src.nextChunk(); } + @Override final public void close() { - src.close(); - + ((ICloseableIterator<?>) src).close(); + } /** * Strengthened return type. */ - public IChunkedStriterator<I, E> addFilter(IFilter<I, ?, E> filter) { - + @Override + public IChunkedStriterator<I, E> addFilter(final IFilter<I, ?, E> filter) { + return (IChunkedStriterator<I, E>) super.addFilter(filter); } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/MergeFilter.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/MergeFilter.java 2013-08-19 12:58:17 UTC (rev 7298) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/MergeFilter.java 2013-08-20 18:26:17 UTC (rev 7299) @@ -48,13 +48,13 @@ private final int chunkSize; private final I src2; - public MergeFilter(I src2) { + public MergeFilter(final I src2) { this( IChunkedIterator.DEFAULT_CHUNK_SIZE, src2 ); } - public MergeFilter(int chunkSize, I src2) { + public MergeFilter(final int chunkSize, final I src2) { if (chunkSize <= 0) throw new IllegalArgumentException(); @@ -68,7 +68,7 @@ } - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "rawtypes" }) public I filter(I src) { return (I) new MergedIterator(chunkSize, src, src2); @@ -89,9 +89,9 @@ private final int chunkSize; private final I src1; private final I src2; - - public MergedIterator(int chunkSize, I src1, I src2) { + public MergedIterator(final int chunkSize, final I src1, final I src2) { + this.chunkSize = chunkSize; this.src1 = src1; @@ -100,23 +100,24 @@ } - @SuppressWarnings("unchecked") + @Override public void close() { if(src1 instanceof ICloseableIterator) { - ((ICloseableIterator)src1).close(); + ((ICloseableIterator<?>)src1).close(); } if(src2 instanceof ICloseableIterator) { - ((ICloseableIterator)src2).close(); + ((ICloseableIterator<?>)src2).close(); } } + @Override public boolean hasNext() { return tmp1 != null || tmp2 != null || src1.hasNext() @@ -127,6 +128,7 @@ private E tmp1; private E tmp2; + @Override public E next() { if (!hasNext()) @@ -196,6 +198,7 @@ } + @Override public void remove() { throw new UnsupportedOperationException(); @@ -206,6 +209,7 @@ * The next chunk of elements in whatever order they were visited by * {@link #next()}. */ + @Override @SuppressWarnings("unchecked") public E[] nextChunk() { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/Striterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/Striterator.java 2013-08-19 12:58:17 UTC (rev 7298) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/striterator/Striterator.java 2013-08-20 18:26:17 UTC (rev 7299) @@ -106,6 +106,7 @@ } + @Override @SuppressWarnings("unchecked") public IStriterator<I, E> addFilter(final IFilter<I, ?, E> filter) { @@ -115,6 +116,7 @@ } + @Override public IStriterator<I, E> addInstanceOfFilter(final Class<E> cls) { return addFilter(new Filter<I, E>(cls) { @@ -132,7 +134,8 @@ } - public IStriterator<I, E> append(I src) { + @Override + public IStriterator<I, E> append(final I src) { return addFilter(new Appender<I, E>(src)); @@ -148,38 +151,42 @@ */ public IStriterator<I,E> exclude(Set<E> set) { // TODO Auto-generated method stub - return null; + throw new UnsupportedOperationException(); } // @todo use temporary store for scalable set to filter the instances. public IStriterator<I,E> makeUnique() { // TODO Auto-generated method stub - return null; + throw new UnsupportedOperationException(); } public IStriterator<I,E> map(Object client, Method method) { // TODO Auto-generated method stub - return null; + throw new UnsupportedOperationException(); } + @Override final public boolean hasMoreElements() { return src.hasNext(); } + @Override final public E nextElement() { return src.next(); } + @Override final public boolean hasNext() { return src.hasNext(); } + @Override final public E next() { return src.next(); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/striterator/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/striterator/TestAll.java 2013-08-19 12:58:17 UTC (rev 7298) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/striterator/TestAll.java 2013-08-20 18:26:17 UTC (rev 7299) @@ -61,7 +61,9 @@ final TestSuite suite = new TestSuite("striterators"); suite.addTestSuite(TestMergeFilter.class); - + + suite.addTestSuite(TestAppendFilter.class); + suite.addTestSuite(TestPushbackIterator.class); suite.addTestSuite(TestResolver.class); Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/striterator/TestAppendFilter.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/striterator/TestAppendFilter.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/striterator/TestAppendFilter.java 2013-08-20 18:26:17 UTC (rev 7299) @@ -0,0 +1,73 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +/* + * Created on Aug 7, 2008 + */ + +package com.bigdata.striterator; + +import java.util.Arrays; + +import junit.framework.TestCase2; + +/** + * Unit tests for {@link Appender}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class TestAppendFilter extends TestCase2 { + + /** + * + */ + public TestAppendFilter() { + + } + + /** + * @param arg0 + */ + public TestAppendFilter(String arg0) { + super(arg0); + + } + + public void test_filter() { + + final IChunkedIterator<Long> actual = (IChunkedIterator<Long>) new ChunkedStriterator<IChunkedIterator<Long>, Long>( + Arrays.asList(new Long[] { 1L, 3L, 5L }).iterator()) + .addFilter(new Appender<IChunkedIterator<Long>, Long>( + new ChunkedWrappedIterator<Long>(Arrays.asList( + new Long[] { 2L, 3L, 4L }).iterator()))); + + assertEquals(new Long[] { // + 1L, 3L, 5L, // src1 + 2L, 3L, 4L, // src2 + }, actual.nextChunk()); + + } + +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2013-08-19 12:58:27
|
Revision: 7298 http://bigdata.svn.sourceforge.net/bigdata/?rev=7298&view=rev Author: martyncutcher Date: 2013-08-19 12:58:17 +0000 (Mon, 19 Aug 2013) Log Message: ----------- Wrote testStartABC_userLevelAbortDoesNotCauseQuorumBreak to use IIndexManagerCallable to run custom code on the leader Modified Paths: -------------- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2013-08-16 12:18:24 UTC (rev 7297) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2013-08-19 12:58:17 UTC (rev 7298) @@ -27,6 +27,7 @@ package com.bigdata.journal.jini.ha; import java.io.IOException; +import java.io.Serializable; import java.lang.reflect.Method; import java.net.InetSocketAddress; import java.rmi.Remote; @@ -147,7 +148,7 @@ log.warn("THREAD DUMP\n" + sb.toString()); } - public interface IIndexManagerCallable<T> extends Callable<T>{ + public interface IIndexManagerCallable<T> extends Serializable, Callable<T> { /** * Invoked before the task is executed to provide a reference to the @@ -176,6 +177,36 @@ } + @SuppressWarnings("serial") + static public abstract class IndexManagerCallable<T> implements IIndexManagerCallable<T> { + private static final Logger log = Logger.getLogger(HAJournal.class); + + private transient IIndexManager indexManager; + + public IndexManagerCallable() { + + } + + public void setIndexManager(IIndexManager indexManager) { + this.indexManager = indexManager; + } + + /** + * Return the {@link IIndexManager}. + * + * @return The data service and never <code>null</code>. + * + * @throws IllegalStateException + * if {@link #setIndexManager(IIndexManager)} has not been invoked. + */ + public IIndexManager getIndexManager() { + if (indexManager == null) + throw new IllegalStateException(); + + return indexManager; + } + } + /** * A {@link Remote} interface for new methods published by the service. */ Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-08-16 12:18:24 UTC (rev 7297) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-08-19 12:58:17 UTC (rev 7298) @@ -264,6 +264,8 @@ * can not reliably predict whether or not the quorum was fully met at * the 2-phase commit. */ + + } /** Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java 2013-08-16 12:18:24 UTC (rev 7297) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java 2013-08-19 12:58:17 UTC (rev 7298) @@ -27,9 +27,15 @@ package com.bigdata.journal.jini.ha; import java.util.UUID; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import org.openrdf.model.Resource; +import org.openrdf.model.URI; +import org.openrdf.model.impl.URIImpl; + import net.jini.config.Configuration; import com.bigdata.ha.HACommitGlue; @@ -39,12 +45,22 @@ import com.bigdata.ha.msg.IHA2PhasePrepareMessage; import com.bigdata.ha.msg.IHANotifyReleaseTimeRequest; import com.bigdata.journal.AbstractTask; +import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.ITx; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.ABC; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.LargeLoadTask; import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; +import com.bigdata.journal.jini.ha.HAJournalTest.IndexManagerCallable; import com.bigdata.journal.jini.ha.HAJournalTest.SpuriousTestException; import com.bigdata.quorum.zk.ZKQuorum; import com.bigdata.quorum.zk.ZKQuorumImpl; +import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; +import com.bigdata.rdf.sail.BigdataSailRepository; import com.bigdata.rdf.sail.webapp.client.HttpException; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.store.BD; import com.bigdata.util.ClocksNotSynchronizedException; import com.bigdata.util.InnerCause; @@ -169,20 +185,90 @@ * break. It should simply discard the buffered write set for that * transactions. * - * TODO Currently, there is a single unisolated connection commit protocol. - * When we add concurrent unisolated writers, the user level transaction - * abort will just discard the buffered writes for a specific - * {@link AbstractTask}. - * * @throws Exception */ - public void testStartABC_userLevelAbortDoesNotCauseQuorumBreak() + @SuppressWarnings("serial") + public void testStartABC_userLevelAbortDoesNotCauseQuorumBreak() throws Exception { - fail("write test"); + final ABC abc = new ABC(false/*sequential*/); // simultaneous start. + final HAGlue serverA = abc.serverA, serverB = abc.serverB, serverC = abc.serverC; + + // Verify quorum is FULLY met. + final long token = awaitFullyMetQuorum(); + + final HAGlueTest leader = (HAGlueTest) quorum.getClient().getLeader(token); + + leader.log("Calling RemoteUserLevelAbortt"); + + final Future<Void> ft = leader.submit(new RemoteUserLevelAbort(), true); + + // Await await user abort task. + try { + ft.get(); + leader.log("DONE: RemoteUserLevelAbort"); + } catch (Exception e) { + fail("Unexpected exception", e); + } + + // quorum should remain met on original token + assertTrue(token == awaitFullyMetQuorum()); } + + /** + * RemoteUserLevel abort connects to Sail with an unisolated + * connection, loads some data then closes the connection resulting + * in a local abort on the leader. + */ + @SuppressWarnings("serial") + static class RemoteUserLevelAbort extends IndexManagerCallable<Void> { + + @Override + public Void call() throws Exception { + + HAJournal journal = (HAJournal) getIndexManager(); + + + final AbstractTripleStore tripleStore = (AbstractTripleStore) journal + .getResourceLocator().locate("kb", ITx.UNISOLATED); + if (tripleStore == null) { + + throw new RuntimeException("Not found: namespace=kb"); + + } + + log.warn("Establishing BigdataSail connection"); + + final BigdataSail sail = new BigdataSail(tripleStore); + + final BigdataSailRepository repo = new BigdataSailRepository(sail); + + repo.initialize(); + + BigdataSailConnection conn = sail.getUnisolatedConnection(); + + final String ns = BD.NAMESPACE; + + final URI mike = new URIImpl(ns+"User"); + final URI bryan = new URIImpl(ns+"Level"); + final URI martyn = new URIImpl(ns+"Abort"); + + conn.addStatement(mike, bryan, martyn, new Resource[]{}); + conn.flush(); + + log.warn("Added data"); + + // A close should trigger an abort - confirmed in log + conn.close(); + + log.warn("Connection closed"); + + return null; + } + } + /** * This test forces clock skew on one of the followers causing it to * encounter an error in its GatherTask. This models the problem that was @@ -503,6 +589,8 @@ // Enforce the join order. final ABC startup = new ABC(true /*sequential*/); + HAJournalTest.dumpThreads(); + final long token = awaitFullyMetQuorum(); // Should be one commit point. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-16 12:18:32
|
Revision: 7297 http://bigdata.svn.sourceforge.net/bigdata/?rev=7297&view=rev Author: thompsonbry Date: 2013-08-16 12:18:24 +0000 (Fri, 16 Aug 2013) Log Message: ----------- Added a method to HAGlueTest that allows the test case to submit a Callable that will execute on the remote server. The test may obtain either a thick future (in which case the comptuation will be done before the Future is returned) or an async Future (in which case the computation might or might not be done before the Future is returned). Note: The Callable MUST implement Serializable! Modified Paths: -------------- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2013-08-15 23:03:34 UTC (rev 7296) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2013-08-16 12:18:24 UTC (rev 7297) @@ -35,6 +35,7 @@ import java.util.Map; import java.util.UUID; import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Future; import java.util.concurrent.FutureTask; @@ -83,6 +84,7 @@ import com.bigdata.ha.msg.IHAWriteSetStateRequest; import com.bigdata.ha.msg.IHAWriteSetStateResponse; import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.IIndexManager; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.jini.ha.HAJournalServer.HAQuorumService; import com.bigdata.quorum.AsynchronousQuorumCloseException; @@ -115,7 +117,65 @@ return new HAGlueTestImpl(serviceId); } + + /** + * Utility accessible for HAGlueTest methods and public static for + * test purposes. + * + * TODO This should use a one-up counter within the service and write + * the thread dump into a file named by that counter. It shoudl include + * the datetime stamp in the file contents. It should log @ ERROR that + * a thread dump was written and include the name of the file. + */ + static public void dumpThreads() { + final StringBuilder sb = new StringBuilder(); + + final Map<Thread, StackTraceElement[]> dump = Thread + .getAllStackTraces(); + + for (Map.Entry<Thread, StackTraceElement[]> threadEntry : dump + .entrySet()) { + final Thread thread = threadEntry.getKey(); + sb.append("THREAD#" + thread.getId() + ", " + thread.getName() + "\n"); + for (StackTraceElement elem : threadEntry.getValue()) { + sb.append("\t" + elem.toString() + "\n"); + } + + } + + log.warn("THREAD DUMP\n" + sb.toString()); + } + + public interface IIndexManagerCallable<T> extends Callable<T>{ + + /** + * Invoked before the task is executed to provide a reference to the + * {@link IIndexManager} on which it is executing. + * + * @param indexManager + * The index manager on the service. + * + * @throws IllegalArgumentException + * if the argument is <code>null</code> + * @throws IllegalStateException + * if {@link #setIndexManager(IIndexManager)} has already been + * invoked and was set with a different value. + */ + void setIndexManager(IIndexManager indexManager); + + /** + * Return the {@link IIndexManager}. + * + * @return The data service and never <code>null</code>. + * + * @throws IllegalStateException + * if {@link #setIndexManager(IIndexManager)} has not been invoked. + */ + IIndexManager getIndexManager(); + + } + /** * A {@link Remote} interface for new methods published by the service. */ @@ -212,11 +272,31 @@ */ public void setNextTimestamp(long nextTimestamp) throws IOException; + /** + * Enable remote lpgging of JVM thread state. + */ + public void dumpThreads() throws IOException; + + /** + * Run the caller's task on the service. + * + * @param callable + * The task to run on the service. + * @param asyncFuture + * <code>true</code> if the task will execute asynchronously + * and return a {@link Future} for the computation that may + * be used to inspect and/or cancel the computation. + * <code>false</code> if the task will execute synchronously + * and return a thick {@link Future}. + */ + public <T> Future<T> submit(IIndexManagerCallable<T> callable, + boolean asyncFuture) throws IOException; + } /** * Identifies a method to be failed and tracks the #of invocations of that - * methosuper. + * method. */ private static class MethodData { @SuppressWarnings("unused") @@ -940,6 +1020,26 @@ } + @Override + public void dumpThreads() { + HAJournalTest.dumpThreads(); + } + + @Override + public <T> Future<T> submit(final IIndexManagerCallable<T> callable, + final boolean asyncFuture) throws IOException { + + callable.setIndexManager(getIndexManager()); + + final Future<T> ft = getIndexManager().getExecutorService().submit( + callable); + + return getProxy(ft, asyncFuture); + + } + + // @Override + } // class HAGlueTestImpl private static class MyPrepareMessage implements IHA2PhasePrepareMessage { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-15 23:03:41
|
Revision: 7296 http://bigdata.svn.sourceforge.net/bigdata/?rev=7296&view=rev Author: thompsonbry Date: 2013-08-15 23:03:34 +0000 (Thu, 15 Aug 2013) Log Message: ----------- Fixed namespace of the ganglia properties. Modified Paths: -------------- branches/READ_CACHE2/src/resources/HAJournal/HAJournal.config Modified: branches/READ_CACHE2/src/resources/HAJournal/HAJournal.config =================================================================== --- branches/READ_CACHE2/src/resources/HAJournal/HAJournal.config 2013-08-15 23:01:33 UTC (rev 7295) +++ branches/READ_CACHE2/src/resources/HAJournal/HAJournal.config 2013-08-15 23:03:34 UTC (rev 7296) @@ -361,10 +361,10 @@ //new NV(Journal.Options.COLLECT_PLATFORM_STATISTICS,"true"), // off by default. // uses bigdata-ganglia module to report service metrics to ganglia. - //new NV(com.bigdata.journal.ganglia.GANGLIA_REPORT,"true"), off by default. + //new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT,"true"), off by default. // uses bigdata-ganglia module to build internal model of cluster load. - //new NV(com.bigdata.journal.ganglia.GANGLIA_LISTEN,"true"), // off by default. + //new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN,"true"), // off by default. }, bigdata.kb); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-15 23:01:41
|
Revision: 7295 http://bigdata.svn.sourceforge.net/bigdata/?rev=7295&view=rev Author: thompsonbry Date: 2013-08-15 23:01:33 +0000 (Thu, 15 Aug 2013) Log Message: ----------- Modified the HAJournal.config file to disable performance counter collection by default. We might need to enable the ganglia-listener and have either the ganglia-reporter or gmond running to support some transparent load balancing strategies for the HAJournalServer, but these performance monitoring capabilities do not need to be on by default and disabling them removes a dependency requirement for sysstat (pidstat, iostat) on linux platforms. Modified Paths: -------------- branches/READ_CACHE2/src/resources/HAJournal/HAJournal.config Modified: branches/READ_CACHE2/src/resources/HAJournal/HAJournal.config =================================================================== --- branches/READ_CACHE2/src/resources/HAJournal/HAJournal.config 2013-08-15 15:57:17 UTC (rev 7294) +++ branches/READ_CACHE2/src/resources/HAJournal/HAJournal.config 2013-08-15 23:01:33 UTC (rev 7295) @@ -346,12 +346,25 @@ new NV(AbstractTransactionService.Options.MIN_RELEASE_AGE,"1"), - /* Enable statistics collection and reporting. */ + /* Enable statistics collection and reporting. + * + * Note: Some of these options have external dependencies, such as + * sysstat (pidstat, iostat) for OS and process level performance + * counter or ganglia (to aggregate and view collected performance + * data). + */ - new NV(Journal.Options.COLLECT_QUEUE_STATISTICS,"true"), - new NV(Journal.Options.COLLECT_PLATFORM_STATISTICS,"true"), - new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT,"true"), - //new NV(com.bigdata.journal.GangliaPlugIn.GANGLIA_LISTEN,"true"), + // performance counters for internal queues. + //new NV(Journal.Options.COLLECT_QUEUE_STATISTICS,"true"), // off by default. + + // platform and process performance counters (requires external s/w on some platforms) + //new NV(Journal.Options.COLLECT_PLATFORM_STATISTICS,"true"), // off by default. + + // uses bigdata-ganglia module to report service metrics to ganglia. + //new NV(com.bigdata.journal.ganglia.GANGLIA_REPORT,"true"), off by default. + + // uses bigdata-ganglia module to build internal model of cluster load. + //new NV(com.bigdata.journal.ganglia.GANGLIA_LISTEN,"true"), // off by default. }, bigdata.kb); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-15 15:57:30
|
Revision: 7294 http://bigdata.svn.sourceforge.net/bigdata/?rev=7294&view=rev Author: thompsonbry Date: 2013-08-15 15:57:17 +0000 (Thu, 15 Aug 2013) Log Message: ----------- A distributed deadlock has been identified whose root cause is a race condition between the transition of a service from RESYNC into a service that is joined with the met quorum and the GATHER in the 2-phase commit protocol. The HAJournalServer doCastLeadersVoteAndServiceJoin() method can deadlock with the GATHER protocol. The deadlock arises because the barrierLock is obtained by the GatherTask before it validates some pre-conditions that are required to participate in the GATHER. One additional wrinkle is that doCastLeadersVoteAndServiceJoin() calls setQuourmToken() *before* it calls leader.awaitServiceJoin(). This causes the service to become HAReady even though it does not yet have the correct releaseTime for the quorum leader. We have verified through testing that this call can be moved to after the release time is reported back from leader.awaitServiceJoin(). The critical section in runWithBarrierLock() now looks like this: {{{ txs.runWithBarrierLock(new Runnable() { public void run() { // Verify that the quorum is valid. getQuorum().assertQuorum(token); // Synchronous service join (blocks until success or // failure). getActor().serviceJoin(); // Verify that the quorum is valid. getQuorum().assertQuorum(token); // // Set the token on the journal. // journal.setQuorumToken(token); // // // Verify that the quorum is valid. // getQuorum().assertQuorum(token); /* * We need to block until the leader observes our service * join. We are blocking replicated writes. That prevents * the leader from initiating a 2-phase commit. By blocking * until our service join becomes visible to the leader, we * are able to ensure that we will participate in a 2-phase * commit where the leader might otherwise have failed to * observe that we are a joined service. * * This addresses a failure mode demonstrated by the test * suite where a service join during a series of short * transactions could fail. The failure mode was that the * newly joined follower was current on the write set and * had invoked serviceJoin(), but the leader did not include * it in the 2-phase commit because the service join event * had not been delivered from zk in time (visibility). * * Note: There is a gap between the GATHER and the PREPARE. * If this service joins with a met quorum after the GATHER * and before the PREPARE, then it MUST set the most recent * consensus release time from the leader on its local * journal. This ensures that the newly joined follower will * not allow a transaction start against a commit point that * was recycled by the leader. * * TODO The leader should use a real commit counter in its * response and the follower should verify that the commit * counter is consistent with its assumptions. */ final IHANotifyReleaseTimeResponse resp; try { resp = leader .awaitServiceJoin(new HAAwaitServiceJoinRequest( getServiceId(), Long.MAX_VALUE/* timeout */, TimeUnit.SECONDS/* unit */)); if (haLog.isInfoEnabled()) haLog.info("Obtained releaseTime from leader: " + resp); } catch (Exception t) { throw new QuorumException( "Service join not observed by leader.", t); } /* * Set the token on the journal. * * Note: We need to do this after the leader has observed * the service join. This is necessary in order to have the * precondition checks in the GatherTask correctly reject a * GatherTask when the service is not yet HAReady. If we set * the quorumToken *before* we await the visibility of the * service join on the leader, then the GatherTask will see * that the follower (this service) that is attempting to * join is HAReady and will incorrectly attempt to execute * the GatherTask, which can result in a deadlock. */ journal.setQuorumToken(token); // Verify that the quorum is valid. getQuorum().assertQuorum(token); // Update the release time on the local journal. txs.setReleaseTime(resp.getCommitTime()); // Verify that the quorum is valid. getQuorum().assertQuorum(token); } }); }}} Since a service that is in runWithBarrierLock() will not be HAReady, we can safely modify the GatherTask to verify the pre-conditions before taking the barrierLock. Note that the GatherTask runs on the "follower". In this case, the contention arises because the service is trying to join. The GatherTask now verifies pre-conditions before taking the barrierLock and also verifies them again after taking the barrierLock. The preconditions are all easy to check and testing them after we take the barrierLock could catch cases where a failure invalidates them before the service prepares and reports its vote for the release time consensus protocol. GatherTask.call in Journal.java {{{ public IHANotifyReleaseTimeResponse call() throws Exception { if (haLog.isInfoEnabled()) haLog.info("Running gather on follower"); /* * This variable is set in the try {} below. We eventually * respond either in the try{} or in the finally{}, depending on * whether or not the GatherTask encounters an error when it * executes. */ didNotifyLeader = false; try { /* * Test pre-conditions BEFORE getting the barrierLock. This * allows a service that is not yet properly joined to * refuse to do the GATHER before it obtains the barrierLock * that makes the GatherTask MUTEX with * doCastLeadersVoteAndServiceJoin(). */ preconditionTest(); barrierLock.lock(); // take lock on follower! try { // Re-test the pre-conditions. preconditionTest(); return doRunWithBarrierLock(); } finally { barrierLock.unlock(); } } catch (Throwable t) { log.error(t, t); if (!didNotifyLeader) { /** * Send mock response to the leader so it does not block * forever waiting for our response. The mock response * MUST include our correct serviceId. * * @see <href= * "https://sourceforge.net/apps/trac/bigdata/ticket/720" * > HA3 simultaneous service start failure </a> */ try { final IHANotifyReleaseTimeRequest resp = new HANotifyReleaseTimeRequest( serviceId, 0L/* pinnedCommitTime */, 0L/* pinnedCommitCounter */, nextTimestamp()/* timestamp */, true/* isMock */, req.getNewCommitCounter(), req.getNewCommitTime()); log.warn("Sending mock response for gather protocol: cause=" + t); // Will block until barrier breaks on leader. leader.notifyEarliestCommitTime(resp); } catch (Throwable t2) { log.error(t2, t2); } } /* * This exception will force PREPARE to fail on this service * when it checks the GatherTask's Future. */ throw new Exception(t); } } }}} Finally, the runWithBarrierLock() method in Journal.java was modified to provide error logging. This simplifies the code when that method is used by doCastLeadersVoteAndServiceJoin(). {{{ @Override public void runWithBarrierLock(final Runnable r) { barrierLock.lock(); try { haLog.info("Will run with barrier lock."); try { r.run(); } catch(Throwable t) { /* * Note: An Interrupt here is not really an ERROR. It * could be caused by a change in the RunState of the * HAJournalServer. */ haLog.error(t, t); } finally { haLog.info("Did run with barrier lock."); } } finally { barrierLock.unlock(); } } }}} See #623 (HA TXS / TXS Bottleneck) Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-15 12:27:47 UTC (rev 7293) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-15 15:57:17 UTC (rev 7294) @@ -5528,7 +5528,7 @@ // Remote interface for the quorum leader. final HAGlue leader = localService.getLeader(newValue); - log.info("Fetching root block from leader."); + haLog.info("Fetching root block from leader."); final IRootBlockView leaderRB; try { leaderRB = leader @@ -5622,8 +5622,8 @@ * commits</a> */ - if (log.isInfoEnabled()) - log.info("Calling localAbort if NOT didJoinMetQuorum: " + if (haLog.isInfoEnabled()) + haLog.info("Calling localAbort if NOT didJoinMetQuorum: " + transitionState.didJoinMetQuorum); if (!transitionState.didJoinMetQuorum) { @@ -5676,7 +5676,7 @@ lock.unlock(); } - + if(haLog.isInfoEnabled()) haLog.info("done: token="+quorumToken+", HAReady="+haReadyToken+", HAStatus="+haStatus); } private final Condition haReadyCondition = _fieldReadWriteLock.writeLock().newCondition(); private volatile long haReadyToken = Quorum.NO_QUORUM; Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-15 12:27:47 UTC (rev 7293) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-15 15:57:17 UTC (rev 7294) @@ -753,8 +753,8 @@ * Thread (actually it now uses barrier.reset()). * * Note: CyclicBarrier.await(timeout,unit) causes the barrier to - * break if the timeout is exceeded. Therefore is CAN NOT be - * used in preference to this pattern. + * break if the timeout is exceeded (as opposed to simply throwing the TimeoutException and allowing the thread to + * retry the CyclicBarrier.await()). Therefore it CAN NOT be used in preference to this pattern. However, we could replace the use of the CyclicBarrier with a Phaser (JDK 1.7 or jr166). */ { // final Thread blockedAtBarrier = Thread.currentThread(); @@ -1183,6 +1183,13 @@ haLog.info("Will run with barrier lock."); try { r.run(); + } catch(Throwable t) { + /* + * Note: An Interrupt here is not really an ERROR. It + * could be caused by a change in the RunState of the + * HAJournalServer. + */ + haLog.error(t, t); } finally { haLog.info("Did run with barrier lock."); } @@ -1540,6 +1547,14 @@ private final UUID serviceId; private final IHAGatherReleaseTimeRequest req; + /** + * This variable is set in the try {} block in {@link #call()}. We + * eventually respond (sending an RMI to the leader) either in the + * try{} or in the finally{}, depending on whether or not the + * {@link GatherTask} encounters an error when it executes. + */ + volatile private boolean didNotifyLeader = false; + public GatherTask(final HAGlue leader, final UUID serviceId, final IHAGatherReleaseTimeRequest req) { @@ -1569,8 +1584,8 @@ */ public IHANotifyReleaseTimeResponse call() throws Exception { - if (log.isInfoEnabled()) - log.info("Running gather on follower"); + if (haLog.isInfoEnabled()) + haLog.info("Running gather on follower"); /* * This variable is set in the try {} below. We eventually @@ -1578,150 +1593,34 @@ * whether or not the GatherTask encounters an error when it * executes. */ -// long now = 0L; - - boolean didNotifyLeader = false; - - barrierLock.lock(); // take lock on follower! + didNotifyLeader = false; try { - final long token = req.token(); - /* - * we do not need to handle the case where the token is - * invalid. The leader will reset() the CylicBarrier for - * this case. + * Test pre-conditions BEFORE getting the barrierLock. This + * allows a service that is not yet properly joined to + * refuse to do the GATHER before it obtains the barrierLock + * that makes the GatherTask MUTEX with + * doCastLeadersVoteAndServiceJoin(). */ + preconditionTest(); - // Verify quorum valid for token (implies leader valid) - getQuorum().assertQuorum(token); + barrierLock.lock(); // take lock on follower! - // Verify this service is HAReady for token. - assertHAReady(token); - - /* - * If the quorumService is null because this service is - * shutting down then the leader will notice the - * serviceLeave() and reset() the CyclicBarrier. - */ - final QuorumService<HAGlue> quorumService = getQuorum() - .getClient(); - -// /* -// * This timestamp is used to help detect clock skew. -// */ -// now = newConsensusProtocolTimestamp(); - - /* - * Note: At this point we have everything we need to form up - * our response. If we hit an assertion, we will still - * respond in the finally {} block below. - */ - - /* - * Note: This assert has been moved to the leader when it - * analyzes the messages from the followers. This allows us - * to report out the nature of the exception on the leader - * and thence back to the client. - */ -// /* Verify event on leader occurs before event on follower. -// */ -// assertBefore(req.getTimestampOnLeader(), now); - - if (!quorumService.isFollower(token)) - throw new QuorumException(); - - final long localCommitCounter = getRootBlockView() - .getCommitCounter(); - - if (req.getNewCommitCounter() != localCommitCounter + 1) { - throw new RuntimeException( - "leader is preparing for commitCounter=" - + req.getNewCommitCounter() - + ", but follower is at localCommitCounter=" - + localCommitCounter); - } - - final IHANotifyReleaseTimeRequest req2 = newHANotifyReleaseTimeRequest( - serviceId, req.getNewCommitCounter(), - req.getNewCommitTime()); - - /* - * RMI to leader. - * - * Note: Will block until barrier breaks on the leader. - */ - - didNotifyLeader = true; - - final IHANotifyReleaseTimeResponse consensusReleaseTime = leader - .notifyEarliestCommitTime(req2); - - /* - * Now spot check the earliest active tx on this follower. - * We want to make sure that this tx is not reading against - * a commit point whose state would be released by the new - * [consensusReleaseTime] that we just obtained from the - * leader. - * - * If everything is Ok, we update the releaseTime on the - * follower. - */ - - lock.lock(); - try { - if (log.isInfoEnabled()) - log.info("Validating consensus releaseTime on follower: consensus=" - + consensusReleaseTime); - - // the earliest active tx on this follower. - final TxState txState = getEarliestActiveTx(); + // Re-test the pre-conditions. + preconditionTest(); - // Consensus for new earliest visible commit time. - final long t2 = consensusReleaseTime.getCommitTime(); + return doRunWithBarrierLock(); - if (txState != null - && txState.getReadsOnCommitTime() < t2) { + } finally { - /* - * At least one transaction exists on the follower - * that is reading on a commit point LT the commit - * point which would be released. This is either a - * failure in the logic to compute the consensus - * releaseTime or a failure to exclude new - * transaction starts on the follower while - * computing the new consensus releaseTime. - */ + barrierLock.unlock(); - throw new AssertionError( - "The releaseTime consensus would release a commit point with active readers" - + ": consensus=" + consensusReleaseTime - + ", earliestActiveTx=" + txState); - - } - - final long newReleaseTime = Math.max(0L, - consensusReleaseTime.getCommitTime() - 1); - - if (log.isInfoEnabled()) - log.info("Advancing releaseTime on follower: " - + newReleaseTime); - - // Update the releaseTime on the follower - setReleaseTime(newReleaseTime); - - } finally { - - lock.unlock(); - } - // Done. - return consensusReleaseTime; - } catch (Throwable t) { log.error(t, t); @@ -1762,14 +1661,169 @@ */ throw new Exception(t); - } finally { + } + + } - barrierLock.unlock(); + /** + * Check various conditions that need to be true. + * <p> + * Note: We do this once before we take the barrier lock and once + * after. We need to do this before we take the barrier lock to + * avoid a distributed deadlock when a service is attempting to do + * runWithBarrierLock() to join concurrent with the GATHER of a + * 2-phase commit. We do it after we take the barrier lock to ensure + * that the conditions are still satisified - they are all light + * weight tests, but the conditions could become invalidated so it + * does not hurt to check again. + */ + private void preconditionTest() { + + final long token = req.token(); + /* + * we do not need to handle the case where the token is + * invalid. The leader will reset() the CylicBarrier for + * this case. + */ + + // Verify quorum valid for token (implies leader valid) + getQuorum().assertQuorum(token); + + // Verify this service is HAReady for token. + assertHAReady(token); + + /* + * If the quorumService is null because this service is + * shutting down then the leader will notice the + * serviceLeave() and reset() the CyclicBarrier. + */ + final QuorumService<HAGlue> quorumService = getQuorum() + .getClient(); + +// /* +// * This timestamp is used to help detect clock skew. +// */ +// now = newConsensusProtocolTimestamp(); + + /* + * Note: At this point we have everything we need to form up + * our response. If we hit an assertion, we will still + * respond in the finally {} block below. + */ + + /* + * Note: This assert has been moved to the leader when it + * analyzes the messages from the followers. This allows us + * to report out the nature of the exception on the leader + * and thence back to the client. + */ +// /* Verify event on leader occurs before event on follower. +// */ +// assertBefore(req.getTimestampOnLeader(), now); + + if (!quorumService.isFollower(token)) + throw new QuorumException(); + + final long localCommitCounter = getRootBlockView() + .getCommitCounter(); + + if (req.getNewCommitCounter() != localCommitCounter + 1) { + throw new RuntimeException( + "leader is preparing for commitCounter=" + + req.getNewCommitCounter() + + ", but follower is at localCommitCounter=" + + localCommitCounter); } - + } + + /** + * This code is MUTEX with runWithBarrierLock() in HAJournalServer's + * doCastLeadersVoteAndJoin(). + */ + private IHANotifyReleaseTimeResponse doRunWithBarrierLock() + throws Exception { + final IHANotifyReleaseTimeRequest req2 = newHANotifyReleaseTimeRequest( + serviceId, req.getNewCommitCounter(), + req.getNewCommitTime()); + + /* + * RMI to leader. + * + * Note: Will block until barrier breaks on the leader. + */ + + didNotifyLeader = true; + + final IHANotifyReleaseTimeResponse consensusReleaseTime = leader + .notifyEarliestCommitTime(req2); + + /* + * Now spot check the earliest active tx on this follower. We + * want to make sure that this tx is not reading against a + * commit point whose state would be released by the new + * [consensusReleaseTime] that we just obtained from the leader. + * + * If everything is Ok, we update the releaseTime on the + * follower. + */ + + lock.lock(); + + try { + + if (log.isInfoEnabled()) + log.info("Validating consensus releaseTime on follower: consensus=" + + consensusReleaseTime); + + // the earliest active tx on this follower. + final TxState txState = getEarliestActiveTx(); + + // Consensus for new earliest visible commit time. + final long t2 = consensusReleaseTime.getCommitTime(); + + if (txState != null && txState.getReadsOnCommitTime() < t2) { + + /* + * At least one transaction exists on the follower that + * is reading on a commit point LT the commit point + * which would be released. This is either a failure in + * the logic to compute the consensus releaseTime or a + * failure to exclude new transaction starts on the + * follower while computing the new consensus + * releaseTime. + */ + + throw new AssertionError( + "The releaseTime consensus would release a commit point with active readers" + + ": consensus=" + consensusReleaseTime + + ", earliestActiveTx=" + txState); + + } + + final long newReleaseTime = Math.max(0L, + consensusReleaseTime.getCommitTime() - 1); + + if (log.isInfoEnabled()) + log.info("Advancing releaseTime on follower: " + + newReleaseTime); + + // Update the releaseTime on the follower + setReleaseTime(newReleaseTime); + + } finally { + + lock.unlock(); + + } + + // Done. + return consensusReleaseTime; + + } // doRunWithBarrierLock + } // GatherTask /** Modified: branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-15 12:27:47 UTC (rev 7293) +++ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-15 15:57:17 UTC (rev 7294) @@ -3564,83 +3564,99 @@ txs.runWithBarrierLock(new Runnable() { - public void run() { - - // Verify that the quorum is valid. - getQuorum().assertQuorum(token); + public void run() { - // Synchronous service join (blocks until success or failure). - getActor().serviceJoin(); + // Verify that the quorum is valid. + getQuorum().assertQuorum(token); - // Verify that the quorum is valid. - getQuorum().assertQuorum(token); + // Synchronous service join (blocks until success or + // failure). + getActor().serviceJoin(); - // Set the token on the journal. - journal.setQuorumToken(token); + // Verify that the quorum is valid. + getQuorum().assertQuorum(token); - // Verify that the quorum is valid. - getQuorum().assertQuorum(token); - - /* - * We need to block until the leader observes our - * service join. We are blocking replicated writes. - * That prevents the leader from initiating a - * 2-phase commit. By blocking until our service - * join becomes visible to the leader, we are able - * to ensure that we will participate in a 2-phase - * commit where the leader might otherwise have - * failed to observe that we are a joined service. - * - * This addresses a failure mode demonstrated by the - * test suite where a service join during a series - * of short transactions could fail. The failure - * mode was that the newly joined follower was - * current on the write set and had invoked - * serviceJoin(), but the leader did not include it - * in the 2-phase commit because the service join - * event had not been delivered from zk in time - * (visibility). - * - * Note: There is a gap between the GATHER and the - * PREPARE. If this service joins with a met quorum - * after the GATHER and before the PREPARE, then it - * MUST set the most recent consensus release time - * from the leader on its local journal. This - * ensures that the newly joined follower will not - * allow a transaction start against a commit point - * that was recycled by the leader. - * - * TODO The leader should use a real commit counter - * in its response and the follower should verify - * that the commit counter is consistent with its - * assumptions. - */ - try { - - final IHANotifyReleaseTimeResponse resp = leader.awaitServiceJoin(new HAAwaitServiceJoinRequest( +// // Set the token on the journal. +// journal.setQuorumToken(token); +// +// // Verify that the quorum is valid. +// getQuorum().assertQuorum(token); + + /* + * We need to block until the leader observes our service + * join. We are blocking replicated writes. That prevents + * the leader from initiating a 2-phase commit. By blocking + * until our service join becomes visible to the leader, we + * are able to ensure that we will participate in a 2-phase + * commit where the leader might otherwise have failed to + * observe that we are a joined service. + * + * This addresses a failure mode demonstrated by the test + * suite where a service join during a series of short + * transactions could fail. The failure mode was that the + * newly joined follower was current on the write set and + * had invoked serviceJoin(), but the leader did not include + * it in the 2-phase commit because the service join event + * had not been delivered from zk in time (visibility). + * + * Note: There is a gap between the GATHER and the PREPARE. + * If this service joins with a met quorum after the GATHER + * and before the PREPARE, then it MUST set the most recent + * consensus release time from the leader on its local + * journal. This ensures that the newly joined follower will + * not allow a transaction start against a commit point that + * was recycled by the leader. + * + * TODO The leader should use a real commit counter in its + * response and the follower should verify that the commit + * counter is consistent with its assumptions. + */ + final IHANotifyReleaseTimeResponse resp; + try { + + resp = leader + .awaitServiceJoin(new HAAwaitServiceJoinRequest( getServiceId(), Long.MAX_VALUE/* timeout */, TimeUnit.SECONDS/* unit */)); - if (log.isInfoEnabled()) - log.info("Obtained releaseTime from leader: " - + resp); + if (haLog.isInfoEnabled()) + haLog.info("Obtained releaseTime from leader: " + + resp); - // Update the release time on the local journal. - txs.setReleaseTime(resp.getCommitTime()); + } catch (Exception t) { + throw new QuorumException( + "Service join not observed by leader.", t); + } - } catch (Throwable t) { - log.error(t, t); - throw new QuorumException( - "Service join not observed by leader.", - t); - } + /* + * Set the token on the journal. + * + * Note: We need to do this after the leader has observed + * the service join. This is necessary in order to have the + * precondition checks in the GatherTask correctly reject a + * GatherTask when the service is not yet HAReady. If we set + * the quorumToken *before* we await the visibility of the + * service join on the leader, then the GatherTask will see + * that the follower (this service) that is attempting to + * join is HAReady and will incorrectly attempt to execute + * the GatherTask, which can result in a deadlock. + */ - // Verify that the quorum is valid. - getQuorum().assertQuorum(token); - } - }); + journal.setQuorumToken(token); + // Verify that the quorum is valid. + getQuorum().assertQuorum(token); + + // Update the release time on the local journal. + txs.setReleaseTime(resp.getCommitTime()); + + // Verify that the quorum is valid. + getQuorum().assertQuorum(token); + + } + }); // runWithBarrierLock() + if (haLog.isInfoEnabled()) haLog.info("TRANSITION", new RuntimeException()); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-15 12:27:57
|
Revision: 7293 http://bigdata.svn.sourceforge.net/bigdata/?rev=7293&view=rev Author: thompsonbry Date: 2013-08-15 12:27:47 +0000 (Thu, 15 Aug 2013) Log Message: ----------- Diagnosed an error with Martyn where the initial KB create failed. The root cause was a failure of the BigdataRDFServletContext to wait until the leader was HAReady before attempting to create the KB. The request to create the KB was concurrent with an abort() that was done by the leader as part of its transition into the HAReady(Leader) state. The fix was to CreateKB. See #720 (HA3 simulataneous start) Modified Paths: -------------- branches/READ_CACHE2/bigdata-sails/src/java/com/bigdata/rdf/sail/CreateKBTask.java Modified: branches/READ_CACHE2/bigdata-sails/src/java/com/bigdata/rdf/sail/CreateKBTask.java =================================================================== --- branches/READ_CACHE2/bigdata-sails/src/java/com/bigdata/rdf/sail/CreateKBTask.java 2013-08-14 18:09:08 UTC (rev 7292) +++ branches/READ_CACHE2/bigdata-sails/src/java/com/bigdata/rdf/sail/CreateKBTask.java 2013-08-15 12:27:47 UTC (rev 7293) @@ -28,6 +28,8 @@ import java.util.Properties; import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import org.apache.log4j.Logger; @@ -91,6 +93,10 @@ } + /** + * TODO This process is not robust if the leader is elected and becomes + * HAReady and then fails over before the KB is created. + */ private void doRun() { if (indexManager instanceof AbstractJournal) { @@ -115,6 +121,9 @@ } else { + /* + * Wait for a quorum meet. + */ final long token; try { long tmp = quorum.token(); @@ -131,6 +140,19 @@ throw new RuntimeException(e1); } + /* + * Now wait until the service is HAReady. + */ + try { + jnl.awaitHAReady(Long.MAX_VALUE, TimeUnit.NANOSECONDS); + } catch (AsynchronousQuorumCloseException e) { + throw new RuntimeException(e); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (TimeoutException e) { + throw new RuntimeException(e); + } + if (quorum.getMember().isLeader(token)) { isSoloOrLeader = true; } else { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-14 18:09:28
|
Revision: 7292 http://bigdata.svn.sourceforge.net/bigdata/?rev=7292&view=rev Author: thompsonbry Date: 2013-08-14 18:09:08 +0000 (Wed, 14 Aug 2013) Log Message: ----------- Merging changes from branches/READ_CACHE2 back into branches/READ_CACHE. These changes are designed to improve the stability and failover characteristics of the HA replication cluster. Work continues on this under branches/READ_CACHE2. @ r7291. {{{ merge https://bigdata.svn.sourceforge.net/svnroot/bigdata/branches/READ_CACHE2 /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/java/com/bigdata/disco G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/java/com/bigdata/disco --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/java/com/bigdata/jini/start/config/ServiceConfiguration.java --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/java/com/bigdata/attr G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/java/com/bigdata/attr --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/java/com/bigdata/util/config G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/java/com/bigdata/util/config --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-perf/lubm G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-perf/lubm --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-perf/uniprot/src G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-perf/uniprot/src --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-perf/uniprot G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-perf/uniprot --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-perf/btc/src/resources G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-perf/btc/src/resources --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-perf/btc G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-perf/btc --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-perf G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-perf --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/src/resources/bin/config G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/src/resources/bin/config --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/lib/jetty G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/lib/jetty --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/bop/joinGraph G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/bop/joinGraph --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/bop/util G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/bop/util --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/quorum/TestSingletonQuorumSemantics.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/jsr166 G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/jsr166 --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/util/httpd G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/util/httpd --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/quorum/QuorumStateChangeListener.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java A /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/quorum/QuorumTokenTransitions.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHAGatherReleaseTimeRequest.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHA2PhasePrepareMessage.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HA2PhasePrepareMessage.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHANotifyReleaseTimeRequest.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumService.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogWriter.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/bop/util G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/bop/util --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/jsr166 G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/jsr166 --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/service/AbstractHATransactionService.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/ClientIndexView.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractMasterTask.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/pipeline/IndexWriteTask.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/AbstractScaleOutClientIndexView.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/io/FileChannelUtility.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/htree/raba G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/htree/raba --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/journal/Journal.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialCloseableIterator.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/IBlockingBuffer.java A /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/util/StackInfoReport.java --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/build.xml --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/osgi G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/osgi --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-compatibility G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-compatibility --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/ticket693.txt C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/property_paths.owl C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket693.java --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/bench G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/bench --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/bench G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/bench --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQuery.java G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.srx C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.ttl C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.rq C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-02.rq U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestOptionals.java --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/internal G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/internal --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/relation G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/relation --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/changesets G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/changesets --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/error G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/error --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/store/TripleStoreUtility.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/DGExpander.java C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/StaticOptimizer.java --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/internal G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/internal --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/relation G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/relation --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/util G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/util --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/samples G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/samples --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/LEGAL G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/LEGAL --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/lib G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/lib --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/src/test/it/unimi/dsi G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/src/test/it/unimi/dsi --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/src/test/it/unimi G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/src/test/it/unimi --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/src/test G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/src/test --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/src/java/it/unimi G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/src/java/it/unimi --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/src/java/it G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/src/java/it --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/src/java G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/src/java --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/src G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils/src --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/dsi-utils --- Merging r7214 through r7291 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE G /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE Merge complete. ===== File Statistics: ===== Added: 2 Updated: 59 ==== Property Statistics: ===== Merged: 50 ==== Conflict Statistics: ===== File conflicts: 1 Tree conflicts: 11 }}} The one file conflict reported in the summary had identical changes in both branches (AbstractHAJournalServerTestCase). There were a number of file conflicts NOT reported in the summary. These all had no changes when comparing the two files. I do not know why the conflict was reported. See #530 (Journal HA) See #720 (HA3 simultaneous start failures) Revision Links: -------------- http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumService.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogWriter.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HA2PhasePrepareMessage.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHA2PhasePrepareMessage.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHAGatherReleaseTimeRequest.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHANotifyReleaseTimeRequest.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java branches/READ_CACHE/bigdata/src/java/com/bigdata/io/FileChannelUtility.java branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/Journal.java branches/READ_CACHE/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java branches/READ_CACHE/bigdata/src/java/com/bigdata/quorum/QuorumStateChangeListener.java branches/READ_CACHE/bigdata/src/java/com/bigdata/service/AbstractHATransactionService.java branches/READ_CACHE/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java branches/READ_CACHE/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java branches/READ_CACHE/bigdata/src/test/com/bigdata/quorum/TestSingletonQuorumSemantics.java branches/READ_CACHE/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/jini/start/config/ServiceConfiguration.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java branches/READ_CACHE/build.xml Added Paths: ----------- branches/READ_CACHE/bigdata/src/java/com/bigdata/quorum/QuorumTokenTransitions.java branches/READ_CACHE/bigdata/src/java/com/bigdata/util/StackInfoReport.java Property Changed: ---------------- branches/READ_CACHE/ branches/READ_CACHE/bigdata/lib/jetty/ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate/ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph/ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/util/ branches/READ_CACHE/bigdata/src/java/com/bigdata/htree/raba/ branches/READ_CACHE/bigdata/src/java/com/bigdata/jsr166/ branches/READ_CACHE/bigdata/src/test/com/bigdata/bop/joinGraph/ branches/READ_CACHE/bigdata/src/test/com/bigdata/bop/util/ branches/READ_CACHE/bigdata/src/test/com/bigdata/jsr166/ branches/READ_CACHE/bigdata/src/test/com/bigdata/util/httpd/ branches/READ_CACHE/bigdata-compatibility/ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/attr/ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/disco/ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/util/config/ branches/READ_CACHE/bigdata-perf/ branches/READ_CACHE/bigdata-perf/btc/ branches/READ_CACHE/bigdata-perf/btc/src/resources/ branches/READ_CACHE/bigdata-perf/lubm/ branches/READ_CACHE/bigdata-perf/uniprot/ branches/READ_CACHE/bigdata-perf/uniprot/src/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/error/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/READ_CACHE/bigdata-rdf/src/samples/ branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/ branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/ branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ branches/READ_CACHE/dsi-utils/ branches/READ_CACHE/dsi-utils/LEGAL/ branches/READ_CACHE/dsi-utils/lib/ branches/READ_CACHE/dsi-utils/src/ branches/READ_CACHE/dsi-utils/src/java/ branches/READ_CACHE/dsi-utils/src/java/it/ branches/READ_CACHE/dsi-utils/src/java/it/unimi/ branches/READ_CACHE/dsi-utils/src/test/ branches/READ_CACHE/dsi-utils/src/test/it/unimi/ branches/READ_CACHE/dsi-utils/src/test/it/unimi/dsi/ branches/READ_CACHE/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/READ_CACHE/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/READ_CACHE/osgi/ branches/READ_CACHE/src/resources/bin/config/ Property changes on: branches/READ_CACHE ___________________________________________________________________ Modified: svn:ignore - ant-build src bin bigdata*.jar ant-release standalone test* countersfinal.xml events.jnl .settings *.jnl TestInsertRate.out SYSTAP-BBT-result.txt U10load+query *.hprof com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv commit-log.txt eventLog dist bigdata-test com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv DIST.bigdata-*.tgz REL.bigdata-*.tgz queryLog* queryRunState* sparql.txt benchmark + ant-build src bin bigdata*.jar ant-release standalone test* countersfinal.xml events.jnl .settings *.jnl TestInsertRate.out SYSTAP-BBT-result.txt U10load+query *.hprof com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv commit-log.txt eventLog dist bigdata-test com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv DIST.bigdata-*.tgz REL.bigdata-*.tgz queryLog* queryRunState* sparql.txt benchmark CI Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7270 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7270 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE2:7217-7291 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 Property changes on: branches/READ_CACHE/bigdata/lib/jetty ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7270 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7270 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE2/bigdata/lib/jetty:7217-7291 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 Property changes on: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/aggregate:7217-7291 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 Property changes on: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/joinGraph:7217-7291 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836 Property changes on: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/util:7217-7291 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836 Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2013-08-14 16:40:31 UTC (rev 7291) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2013-08-14 18:09:08 UTC (rev 7292) @@ -326,7 +326,7 @@ } dce = tst; } else if (tst.getEventType() == QuorumStateChangeEventEnum.PIPELINE_ADD) { - add = tst; + add = tst; } else if (tst.getEventType() == QuorumStateChangeEventEnum.PIPELINE_REMOVE) { if (add != null) { if (log.isDebugEnabled()) { @@ -1300,7 +1300,7 @@ * attempting to robustly replicate a write into a single method. This * was done in order to concentrate any conditional logic and design * rationale into a single method. - * + * <p> * Note: IFF we allow non-leaders to replicate HALog messages then this * assert MUST be changed to verify that the quorum token remains valid * and that this service remains joined with the met quorum, i.e., @@ -1499,10 +1499,15 @@ try { - if (log.isTraceEnabled()) - log.trace("Leader will send: " + b.remaining() + if (log.isInfoEnabled() || retryCount > 0) { + final String msg2 = "Leader will send: " + b.remaining() + " bytes, retryCount=" + retryCount + ", req=" - + req + ", msg=" + msg); + + req + ", msg=" + msg; + if (retryCount > 0) + log.warn(msg2); + else + log.info(msg2); + } // retest while holding lock before sending the message. assertQuorumState(); @@ -2068,5 +2073,23 @@ } } + + /** + * Called from ErrorTask in HAJournalServer to ensure that events are + * processed before entering SeekConsensus. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/695"> + * HAJournalServer reports "follower" but is in SeekConsensus and is not + * participating in commits</a> + */ + public void processEvents() { + this.lock.lock(); + try { + innerEventHandler.dispatchEvents();// have lock, dispatch events. + } finally { + this.lock.unlock(); + } + + } } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumService.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumService.java 2013-08-14 16:40:31 UTC (rev 7291) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumService.java 2013-08-14 18:09:08 UTC (rev 7292) @@ -124,21 +124,15 @@ final IRootBlockView rootBlock1); /** - * Callback method. - * - * @param token - * The token on which the service joined a met quorum. - * @param commitCounter - * The commitCounter for the local service. - * @param isLeader - * <code>true</code> iff the local service is the quorum leader. + * Enter an error state. The error state should take whatever corrective + * actions are necessary in order to prepare the service for continued + * operations. */ - /* - * I added this in but wound up not needed it. Do not use without good - * justification. - */ - @Deprecated - void didMeet(final long token, final long commitCounter, - final boolean isLeader); + void enterErrorState(); + /** + * Discard all state associated with the current write set. + */ + void discardWriteSet(); + } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java 2013-08-14 16:40:31 UTC (rev 7291) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java 2013-08-14 18:09:08 UTC (rev 7292) @@ -372,5 +372,19 @@ return readImpl.readFromQuorum(storeId, addr); } + + /** + * Called from ErrorTask in HAJournalServer to ensure that events are + * processed before entering SeekConsensus. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/695"> + * HAJournalServer reports "follower" but is in SeekConsensus and is not + * participating in commits</a> + */ + protected void processEvents() { + pipelineImpl.processEvents(); + + } + } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-08-14 16:40:31 UTC (rev 7291) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-08-14 18:09:08 UTC (rev 7292) @@ -47,6 +47,7 @@ import com.bigdata.journal.RootBlockView; import com.bigdata.journal.StoreTypeEnum; import com.bigdata.rawstore.Bytes; +import com.bigdata.util.StackInfoReport; /** * Wrapper class to handle process log creation and output for HA. @@ -260,7 +261,7 @@ throw new IllegalStateException(); if (haLog.isInfoEnabled()) - haLog.info("rootBlock=" + rootBlock); + haLog.info("rootBlock=" + rootBlock, new StackInfoReport()); m_rootBlock = rootBlock; @@ -603,11 +604,15 @@ } - /** - * On various error conditions we may need to remove the log - * - * @throws IOException - */ + /** + * On various error conditions we may need to remove the log + * + * @throws IOException + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/695"> + * HAJournalServer reports "follower" but is in SeekConsensus and is + * not participating in commits\xA7</a> + */ private void remove() throws IOException { final Lock lock = m_stateLock.writeLock(); @@ -620,8 +625,18 @@ * Conditional remove iff file is open. Will not remove * something that has been closed. */ + final boolean isCommitted = m_state.isCommitted(); + + if (haLog.isInfoEnabled()) + haLog.info("Will close: " + m_state.m_haLogFile + ", committed: " + isCommitted); + m_state.forceCloseAll(); - + + if (isCommitted) return; // Do not remove a sealed HALog file! + + if (haLog.isInfoEnabled()) + haLog.info("Will remove: " + m_state.m_haLogFile, new StackInfoReport()); + if (m_state.m_haLogFile.exists() && !m_state.m_haLogFile.delete()) { /* @@ -646,9 +661,7 @@ } - /** - * Disable (and remove) the current log file if one is open. - */ + @Override public void disableHALog() throws IOException { if (haLog.isInfoEnabled()) Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogWriter.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogWriter.java 2013-08-14 16:40:31 UTC (rev 7291) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogWriter.java 2013-08-14 18:09:08 UTC (rev 7292) @@ -88,7 +88,13 @@ public void closeHALog(IRootBlockView rootBlock) throws IOException; /** - * Disable (and remove) the current log file if one is open. + * Disable (and remove) the current log file if one is open (an HALog file + * which has been committed by applying its closing root block is NOT + * removed). + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/695"> + * HAJournalServer reports "follower" but is in SeekConsensus and is + * not participating in commits</a> */ public void disableHALog() throws IOException; Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HA2PhasePrepareMessage.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HA2PhasePrepareMessage.java 2013-08-14 16:40:31 UTC (rev 7291) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HA2PhasePrepareMessage.java 2013-08-14 18:09:08 UTC (rev 7292) @@ -130,7 +130,19 @@ return unit; } + /** + * {@inheritDoc} + * <p> + * Returns <code>false</code> by default + */ @Override + public boolean voteNo() { + + return false; + + } + + @Override public String toString() { return super.toString()+"{"// +"consensusReleaseTime="+getConsensusReleaseTime()// Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java 2013-08-14 16:40:31 UTC (rev 7291) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java 2013-08-14 18:09:08 UTC (rev 7292) @@ -23,6 +23,8 @@ */ package com.bigdata.ha.msg; +import java.util.UUID; + public class HAGatherReleaseTimeRequest implements IHAGatherReleaseTimeRequest { @@ -33,17 +35,28 @@ private final long token; private final long timestampOnLeader; + private final UUID leaderId; + private final long newCommitCounter; + private final long newCommitTime; public HAGatherReleaseTimeRequest(final long token, - final long timestampOnLeader) { + final long timestampOnLeader, final UUID leaderId, + final long newCommitCounter, final long newCommitTime) { + if (leaderId == null) + throw new IllegalArgumentException(); this.token = token; this.timestampOnLeader = timestampOnLeader; + this.leaderId = leaderId; + this.newCommitCounter = newCommitCounter; + this.newCommitTime = newCommitTime; } @Override public String toString() { return super.toString() + "{token=" + token + ",timestampOnLeader=" - + timestampOnLeader + "}"; + + timestampOnLeader + ", leaderId=" + leaderId + + ", newCommitCounter=" + newCommitCounter + ", newCommitTime=" + + newCommitTime + "}"; } @Override @@ -55,5 +68,20 @@ public long getTimestampOnLeader() { return timestampOnLeader; } + + @Override + public UUID getLeaderId() { + return leaderId; + } + @Override + public long getNewCommitCounter() { + return newCommitCounter; + } + + @Override + public long getNewCommitTime() { + return newCommitTime; + } + } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java 2013-08-14 16:40:31 UTC (rev 7291) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java 2013-08-14 18:09:08 UTC (rev 7292) @@ -36,14 +36,27 @@ private final long pinnedCommitTime; private final long pinnedCommitCounter; private final long timestamp; + private final boolean isMock; + private final long newCommitCounter; + private final long newCommitTime; public HANotifyReleaseTimeRequest(final UUID serviceUUID, final long pinnedCommitTime, final long pinnedCommitCounter, - final long timestamp) { + final long timestamp, final boolean isMock, + final long newCommitCounter, final long newCommitTime) { + if (serviceUUID == null) + throw new IllegalArgumentException(); + if (pinnedCommitTime < 0) + throw new IllegalArgumentException(); + if (pinnedCommitCounter < 0) + throw new IllegalArgumentException(); this.serviceUUID = serviceUUID; this.pinnedCommitTime = pinnedCommitTime; this.pinnedCommitCounter = pinnedCommitCounter; this.timestamp = timestamp; + this.isMock = isMock; + this.newCommitCounter = newCommitCounter; + this.newCommitTime = newCommitTime; } @Override @@ -51,7 +64,7 @@ return super.toString() + "{serviceUUID=" + serviceUUID + ",pinnedCommitTime=" + pinnedCommitTime + ",pinnedCommitCounter=" + pinnedCommitCounter + ",timestamp=" - + timestamp + "}"; + + timestamp + ", isMock=" + isMock + "}"; } @Override @@ -74,4 +87,19 @@ return timestamp; } + @Override + public boolean isMock() { + return isMock; + } + + @Override + public long getNewCommitCounter() { + return newCommitCounter; + } + + @Override + public long getNewCommitTime() { + return newCommitTime; + } + } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHA2PhasePrepareMessage.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHA2PhasePrepareMessage.java 2013-08-14 16:40:31 UTC (rev 7291) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHA2PhasePrepareMessage.java 2013-08-14 18:09:08 UTC (rev 7292) @@ -88,4 +88,10 @@ * The unit for the timeout. */ TimeUnit getUnit(); + + /** + * When <code>true</code>, always vote note. + */ + boolean voteNo(); + } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHAGatherReleaseTimeRequest.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHAGather... [truncated message content] |
From: <tho...@us...> - 2013-08-14 16:40:48
|
Revision: 7291 http://bigdata.svn.sourceforge.net/bigdata/?rev=7291&view=rev Author: thompsonbry Date: 2013-08-14 16:40:31 +0000 (Wed, 14 Aug 2013) Log Message: ----------- Modified the HA test suite to ALWAYS use the HAJournalTest class by specifying that class in the HAJournal-{A,B,C}.config files. This is in preparation for Martyn adding a method to write a thread dump into the log on the HAGlueTest API and then modifying the test harness to request those thread dumps on the running service(s) when a test would fail due to a timeout. This will help us to look for the root causes of timeouts in distributed deadlocks. See #723 (HA asynchronous tasks must be canceled when invariants are changed) Modified Paths: -------------- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config 2013-08-14 16:04:43 UTC (rev 7290) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config 2013-08-14 16:40:31 UTC (rev 7291) @@ -269,6 +269,10 @@ replicationFactor = bigdata.replicationFactor; + // Use the overridden version of the HAJournal by default so we get the + // HAGlueTest API for every test. + HAJournalClass = "com.bigdata.journal.jini.ha.HAJournalTest"; + } /* Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config 2013-08-14 16:04:43 UTC (rev 7290) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config 2013-08-14 16:40:31 UTC (rev 7291) @@ -268,6 +268,10 @@ replicationFactor = bigdata.replicationFactor; + // Use the overridden version of the HAJournal by default so we get the + // HAGlueTest API for every test. + HAJournalClass = "com.bigdata.journal.jini.ha.HAJournalTest"; + } /* Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config 2013-08-14 16:04:43 UTC (rev 7290) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config 2013-08-14 16:40:31 UTC (rev 7291) @@ -268,6 +268,10 @@ replicationFactor = bigdata.replicationFactor; + // Use the overridden version of the HAJournal by default so we get the + // HAGlueTest API for every test. + HAJournalClass = "com.bigdata.journal.jini.ha.HAJournalTest"; + } /* Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java 2013-08-14 16:04:43 UTC (rev 7290) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java 2013-08-14 16:40:31 UTC (rev 7291) @@ -62,7 +62,7 @@ // "com.bigdata.journal.HAJournal.properties=" +TestHA3JournalServer.getTestHAJournalProperties(com.bigdata.journal.HAJournal.properties), "com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy(0L,1,0)", "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", - "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"", +// "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"", "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", }; Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java 2013-08-14 16:04:43 UTC (rev 7290) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java 2013-08-14 16:40:31 UTC (rev 7291) @@ -67,7 +67,7 @@ return new String[]{ "com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy(0L,1,0)", "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", - "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"" +// "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"" }; } Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-08-14 16:04:43 UTC (rev 7290) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-08-14 16:40:31 UTC (rev 7291) @@ -96,7 +96,7 @@ // "com.bigdata.journal.HAJournal.properties=" +TestHA3JournalServer.getTestHAJournalProperties(com.bigdata.journal.HAJournal.properties), "com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy(0L,1,0)", "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", - "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"", +// "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"", "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", }; Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java 2013-08-14 16:04:43 UTC (rev 7290) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java 2013-08-14 16:40:31 UTC (rev 7291) @@ -76,7 +76,7 @@ return new String[]{ "com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy(0L,1,0)", "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", - "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"" +// "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"" }; } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-14 16:04:53
|
Revision: 7290 http://bigdata.svn.sourceforge.net/bigdata/?rev=7290&view=rev Author: thompsonbry Date: 2013-08-14 16:04:43 +0000 (Wed, 14 Aug 2013) Log Message: ----------- - done: Tag the Gather messages with the commitCounter and a one up gather attempt number (or ts on the leadersValue) and verify that the gather task in prepare2Phase() was the RIGHT gather task. [The newCommitCounter and newCommitTime are now passed into the GATHER protocol. If the follower is not at the expected commit counter (newCommitCounter-1) then the GatherTask will fail. Also, if the leader receives a response from a GatherTask that is for the wrong newCommitCounter or newCommitTime then the GATHER will fail.] - done: We were failing to check the Future of the RMI requests to start a GatherTask on the follower. This lead to a deadlock in one of the testStartAB_C_MultiTransactionResync_0tx_then_500ms_delay test runs. This was fixed by checking the Future for the RMI in the monitoring Runnable. If any RMI fails, then the GATHER is cancelled. See #720 (HA3 simultaneous starts) Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHAGatherReleaseTimeRequest.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHANotifyReleaseTimeRequest.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/AbstractHATransactionService.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java 2013-08-14 14:34:48 UTC (rev 7289) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java 2013-08-14 16:04:43 UTC (rev 7290) @@ -36,20 +36,27 @@ private final long token; private final long timestampOnLeader; private final UUID leaderId; + private final long newCommitCounter; + private final long newCommitTime; public HAGatherReleaseTimeRequest(final long token, - final long timestampOnLeader, final UUID leaderId) { + final long timestampOnLeader, final UUID leaderId, + final long newCommitCounter, final long newCommitTime) { if (leaderId == null) throw new IllegalArgumentException(); this.token = token; this.timestampOnLeader = timestampOnLeader; this.leaderId = leaderId; + this.newCommitCounter = newCommitCounter; + this.newCommitTime = newCommitTime; } @Override public String toString() { return super.toString() + "{token=" + token + ",timestampOnLeader=" - + timestampOnLeader + ", leaderId=" + leaderId + "}"; + + timestampOnLeader + ", leaderId=" + leaderId + + ", newCommitCounter=" + newCommitCounter + ", newCommitTime=" + + newCommitTime + "}"; } @Override @@ -67,4 +74,14 @@ return leaderId; } + @Override + public long getNewCommitCounter() { + return newCommitCounter; + } + + @Override + public long getNewCommitTime() { + return newCommitTime; + } + } Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java 2013-08-14 14:34:48 UTC (rev 7289) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java 2013-08-14 16:04:43 UTC (rev 7290) @@ -37,10 +37,13 @@ private final long pinnedCommitCounter; private final long timestamp; private final boolean isMock; + private final long newCommitCounter; + private final long newCommitTime; public HANotifyReleaseTimeRequest(final UUID serviceUUID, final long pinnedCommitTime, final long pinnedCommitCounter, - final long timestamp, final boolean isMock) { + final long timestamp, final boolean isMock, + final long newCommitCounter, final long newCommitTime) { if (serviceUUID == null) throw new IllegalArgumentException(); if (pinnedCommitTime < 0) @@ -52,6 +55,8 @@ this.pinnedCommitCounter = pinnedCommitCounter; this.timestamp = timestamp; this.isMock = isMock; + this.newCommitCounter = newCommitCounter; + this.newCommitTime = newCommitTime; } @Override @@ -87,4 +92,14 @@ return isMock; } + @Override + public long getNewCommitCounter() { + return newCommitCounter; + } + + @Override + public long getNewCommitTime() { + return newCommitTime; + } + } Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHAGatherReleaseTimeRequest.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHAGatherReleaseTimeRequest.java 2013-08-14 14:34:48 UTC (rev 7289) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHAGatherReleaseTimeRequest.java 2013-08-14 16:04:43 UTC (rev 7290) @@ -56,5 +56,15 @@ * is (or was) the leader even if the token has been invalidated. */ public UUID getLeaderId(); + + /** + * The commit counter that will be assigned to the new commit point. + */ + public long getNewCommitCounter(); + + /** + * The commit time that will be assigned to the new commit point. + */ + public long getNewCommitTime(); } Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHANotifyReleaseTimeRequest.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHANotifyReleaseTimeRequest.java 2013-08-14 14:34:48 UTC (rev 7289) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHANotifyReleaseTimeRequest.java 2013-08-14 16:04:43 UTC (rev 7290) @@ -84,5 +84,17 @@ * simultaneous service start failure </a> */ public boolean isMock(); + + /** + * The commit counter that will be assigned to the new commit point (as + * specified by the leader). + */ + public long getNewCommitCounter(); + + /** + * The commit time that will be assigned to the new commit point (as + * specified by the leader). + */ + public long getNewCommitTime(); } Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-14 14:34:48 UTC (rev 7289) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-14 16:04:43 UTC (rev 7290) @@ -3076,6 +3076,10 @@ assertCommitTimeAdvances(commitTime); + final IRootBlockView old = _rootBlock; + + final long newCommitCounter = old.getCommitCounter() + 1; + /* * First, run each of the committers accumulating the updated root * addresses in an array. In general, these are btrees and they may @@ -3156,7 +3160,8 @@ // Run the GATHER protocol. consensusReleaseTime = ((AbstractHATransactionService) getLocalTransactionManager() .getTransactionService()) - .updateReleaseTimeConsensus( + .updateReleaseTimeConsensus(newCommitCounter, + commitTime, gatherJoinedAndNonJoinedServices.getJoinedServiceIds(), getHAReleaseTimeConsensusTimeout(), TimeUnit.MILLISECONDS); @@ -3210,10 +3215,6 @@ * but good root blocks can be found elsewhere in the file. */ - final IRootBlockView old = _rootBlock; - - final long newCommitCounter = old.getCommitCounter() + 1; - final ICommitRecord commitRecord = new CommitRecord(commitTime, newCommitCounter, rootAddrs); Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-14 14:34:48 UTC (rev 7289) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-14 16:04:43 UTC (rev 7290) @@ -34,8 +34,10 @@ import java.util.UUID; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; @@ -317,16 +319,24 @@ /** * The token that must remain valid. - * - * TODO HA TXS: We should also verify that the responses we collect are - * for the same request. This could be done using a request UUID or - * one-up request counter. That would guard against having a service - * reconnect and respond late once the leader had gotten to another - * commit point. */ final private long token; /** + * The commit counter that will be assigned to the commit point. This is + * used to ensure that the GATHER and PREPARE are for the same commit + * point and that the follower is at the previous commit point. + */ + final private long newCommitCounter; + + /** + * The commit time that will be assigned to the commit point. This is + * used to ensure that the GATHER and PREPARE are for the same commit + * point and that the follower is at the previous commit point. + */ + final private long newCommitTime; + + /** * Local HA service implementation (non-Remote). */ final private QuorumService<HAGlue> quorumService; @@ -442,10 +452,15 @@ // } /** The services joined with the met quorum, in their join order. */ - public BarrierState(final UUID[] joinedServiceIds) { + public BarrierState(final long newCommitCounter, + final long newCommitTime, final UUID[] joinedServiceIds) { token = getQuorum().token(); + this.newCommitCounter = newCommitCounter; + + this.newCommitTime = newCommitTime; + getQuorum().assertLeader(token); // Local HA service implementation (non-Remote). @@ -456,7 +471,8 @@ this.leaderId = quorumService.getServiceId(); leadersValue = ((InnerJournalTransactionService) getTransactionService()) - .newHANotifyReleaseTimeRequest(leaderId); + .newHANotifyReleaseTimeRequest(leaderId, newCommitCounter, + newCommitTime); // Note: Local method call. timestampOnLeader = leadersValue.getTimestamp(); @@ -568,6 +584,33 @@ } /** + * Task does an RMI to the follower to start the GatherTask on the + * follower. + */ + private final class StartGatherOnFollowerTask implements Callable<Void> { + + private final UUID serviceId; + private final IHAGatherReleaseTimeRequest msg; + + public StartGatherOnFollowerTask(final UUID serviceId, + final IHAGatherReleaseTimeRequest msg) { + this.serviceId = serviceId; + this.msg = msg; + } + + public Void call() throws Exception { + // Resolve joined service. + final HATXSGlue service = getService(serviceId); + // Message remote service. + // Note: NPE if [service] is gone. + service.gatherMinimumVisibleCommitTime(msg); + // Done. + return null; + } + + } // class StartGatherOnFollowerTask + + /** * Send an {@link IHAGatherReleaseTimeRequest} message to each follower. * Block until the responses are received. * <p> @@ -624,8 +667,9 @@ try { final IHAGatherReleaseTimeRequest msg = new HAGatherReleaseTimeRequest( - token, timestampOnLeader, leaderId); - + token, timestampOnLeader, leaderId, newCommitCounter, + newCommitTime); + // Do not send message to self (leader is at index 0). for (int i = 1; i < joinedServiceIds.length; i++) { @@ -648,17 +692,7 @@ */ // Note: throws RejectedExecutionException if shutdown. futures.add(getExecutorService().submit( - new Callable<Void>() { - public Void call() throws Exception { - // Resolve joined service. - final HATXSGlue service = getService(serviceId); - // Message remote service. - // Note: NPE if [service] is gone. - service.gatherMinimumVisibleCommitTime(msg); - // Done. - return null; - } - })); + new StartGatherOnFollowerTask(serviceId, msg))); // // add to list of futures we will check. // remoteFutures[i] = rf; @@ -748,16 +782,26 @@ // Verify messaged services still // joined. assertServicesStillJoined(quorum); - - } catch (QuorumException ex) { - if (!barrier.isBroken()) { - - barrier.reset(); - + for (Future<Void> f : futures) { + if (f.isDone()) { + /* + * Note: If any follower fails + * on the RMI, then that is + * noticed here and the GATHER + * will fail on the leader. + * + * TODO This should be robust as + * long as a majority of the + * services succeed. Right now + * this will stop the GATHER if + * any service fails on the RMI. + */ + f.get(); + } } - } catch (RuntimeException ex) { + } catch (Throwable ex) { if (InnerCause.isInnerCause(ex, InterruptedException.class)) { @@ -766,25 +810,9 @@ return; } - - /* - * Something went wrong in the - * monitoring code. - */ - log.error(ex, ex); + logErrorAndResetBarrier(ex); - if (!barrier.isBroken()) { - - /* - * Force the barrier to break since - * we will no longer be monitoring - * the quorum state. - */ - barrier.reset(); - - } - } } @@ -827,18 +855,29 @@ // } } finally { + /* * Cancel local futures for RMI messages to followers. * * Note: Regardless of outcome or errors above, ensure that the * futures used to initiate the GatherTask on the followers are * cancelled. These are local Futures that do RMIs. The RMIs - * should not block when the execute on the follower. + * should not block when they execute on the follower. */ for (Future<Void> f : futures) { f.cancel(true/* mayInterruptIfRunning */); + + try { + f.get(); + } catch (CancellationException e) { + // Probably blocked on the RMI. + log.error(e, e); + } catch (ExecutionException e) { + // Probably error on the RMI. + log.error(e, e); + } } @@ -871,11 +910,13 @@ * action ourselves. E.g., in the thread that calls * barrier.reset()]. [Actually, this might not be a problem * for cases where the GatherTask is able to send back a - * mock IHANotifyReleaseTimeRequest message.] + * mock IHANotifyReleaseTimeRequest message, only when we + * are interrupted by the Runnable above that is monitoring + * the quorum state for an invariant change.] */ - + log.error("Forcing barrier break"); - + barrier.reset(); } @@ -985,6 +1026,20 @@ // } // // } + + private void logErrorAndResetBarrier(final Throwable ex) { + + log.error(ex, ex); + + if (!barrier.isBroken()) { + + log.error("Forcing barrier break"); + + barrier.reset(); + + } + + } /** * Verify that the services that were messaged for the release time @@ -1157,9 +1212,12 @@ */ // Note: Executed on the leader. @Override - public IHANotifyReleaseTimeResponse updateReleaseTimeConsensus(final UUID[] joinedServiceIds, - final long timeout, final TimeUnit units) throws IOException, - InterruptedException, TimeoutException, BrokenBarrierException { + public IHANotifyReleaseTimeResponse updateReleaseTimeConsensus( + final long newCommitCounter, + final long newCommitTime, + final UUID[] joinedServiceIds, final long timeout, + final TimeUnit units) throws IOException, InterruptedException, + TimeoutException, BrokenBarrierException { final long begin = System.nanoTime(); final long nanos = units.toNanos(timeout); @@ -1168,8 +1226,8 @@ final long token = getQuorum().token(); if (haLog.isInfoEnabled()) - haLog.info("GATHER PROTOCOL: token=" + token - + ", joinedServiceIds=" + haLog.info("GATHER PROTOCOL: commitCounter=" + newCommitCounter + + ", token=" + token + ", joinedServiceIds=" + Arrays.toString(joinedServiceIds)); final BarrierState barrierState; @@ -1181,7 +1239,8 @@ getQuorum().assertLeader(token); if (!barrierRef.compareAndSet(null/* expectedValue */, - barrierState = new BarrierState(joinedServiceIds)/* newValue */)) { + barrierState = new BarrierState(newCommitCounter, + newCommitTime, joinedServiceIds)/* newValue */)) { throw new IllegalStateException(); @@ -1391,7 +1450,8 @@ * @return The new message. */ protected IHANotifyReleaseTimeRequest newHANotifyReleaseTimeRequest( - final UUID serviceId) { + final UUID serviceId, final long newCommitCounter, + final long newCommitTime) { // On AbstractTransactionService. final long effectiveReleaseTimeForHA = getEffectiveReleaseTimeForHA(); @@ -1409,7 +1469,8 @@ final long now = newConsensusProtocolTimestamp(); final IHANotifyReleaseTimeRequest req = new HANotifyReleaseTimeRequest( - serviceId, commitTime, commitCounter, now, false/* isMock */); + serviceId, commitTime, commitCounter, now, false/* isMock */, + newCommitCounter, newCommitTime); if (log.isTraceEnabled()) log.trace("releaseTime=" + getReleaseTime()// @@ -1571,7 +1632,20 @@ if (!quorumService.isFollower(token)) throw new QuorumException(); - final IHANotifyReleaseTimeRequest req2 = newHANotifyReleaseTimeRequest(serviceId); + final long localCommitCounter = getRootBlockView() + .getCommitCounter(); + + if (req.getNewCommitCounter() != localCommitCounter + 1) { + throw new RuntimeException( + "leader is preparing for commitCounter=" + + req.getNewCommitCounter() + + ", but follower is at localCommitCounter=" + + localCommitCounter); + } + + final IHANotifyReleaseTimeRequest req2 = newHANotifyReleaseTimeRequest( + serviceId, req.getNewCommitCounter(), + req.getNewCommitTime()); /* * RMI to leader. @@ -1668,7 +1742,10 @@ final IHANotifyReleaseTimeRequest resp = new HANotifyReleaseTimeRequest( serviceId, 0L/* pinnedCommitTime */, 0L/* pinnedCommitCounter */, - nextTimestamp()/* timestamp */, true/* isMock */); + nextTimestamp()/* timestamp */, + true/* isMock */, + req.getNewCommitCounter(), + req.getNewCommitTime()); log.warn("Sending mock response for gather protocol: cause=" + t); // Will block until barrier breaks on leader. @@ -1705,13 +1782,6 @@ * Note: We MUST NOT contend for the {@link #barrierLock} here. That * lock is held by the Thread that invoked * {@link #updateReleaseTimeConsensus()}. - * - * TODO HA TXS: We should ensure that the [req] is for the same gather() - * request as this barrier instance. That will let us detect a service - * that responds late (after a transient disconnect) when the leader has - * moved on to another commit. See BarrierState#token for more on this. - * [Note that [req] can not safely be [null] since the follower must - * self-report its serviceId.] */ @Override public IHANotifyReleaseTimeResponse notifyEarliestCommitTime( @@ -1745,6 +1815,25 @@ getQuorum().assertLeader(barrierState.token); + if (barrierState.newCommitCounter != req.getNewCommitCounter()) { + /* + * Response is for the wrong GATHER request. + */ + throw new RuntimeException( + "Wrong newCommitCounter: expected=" + + barrierState.newCommitCounter + + ", actual=" + req.getNewCommitCounter()); + } + + if (barrierState.newCommitTime != req.getNewCommitTime()) { + /* + * Response is for the wrong GATHER request. + */ + throw new RuntimeException("Wrong newCommitTime: expected=" + + barrierState.newCommitTime + ", actual=" + + req.getNewCommitTime()); + } + // ServiceId of the follower (NPE if req is null). final UUID followerId = req.getServiceUUID(); Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/AbstractHATransactionService.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/AbstractHATransactionService.java 2013-08-14 14:34:48 UTC (rev 7289) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/AbstractHATransactionService.java 2013-08-14 16:04:43 UTC (rev 7290) @@ -82,6 +82,11 @@ * Coordinate the update of the <i>releaseTime</i> on each service that is * joined with the met quorum. * + * @param newCommitCounter + * The commit counter that will be assigned to the new commit + * point. + * @param newCommitTime + * The commit time that will be assigned to the new commit point. * @param joinedServiceIds * The services that are joined with the met quorum as of an * atomic decision point in {@link AbstractJournal#commitNow()}. @@ -91,6 +96,8 @@ * The units for that timeout. */ abstract public IHANotifyReleaseTimeResponse updateReleaseTimeConsensus( + final long newCommitCounter, + final long newCommitTime, final UUID[] joinedServiceIds, final long timeout, final TimeUnit units) throws IOException, TimeoutException, InterruptedException, Exception; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-14 14:35:07
|
Revision: 7289 http://bigdata.svn.sourceforge.net/bigdata/?rev=7289&view=rev Author: thompsonbry Date: 2013-08-14 14:34:48 +0000 (Wed, 14 Aug 2013) Log Message: ----------- - Why is the barrier count not reflecting the leader's consensus vote? nparties 2 versus 3? (because only follower responses are counted here; I have renamed the responses field as followerResponses to clarify this and also updated the logged message). - What is the impact of the mock notify message? That the consensus release time can not advance? (We now explicitly mark the mock GATHER responses and then ignored them on the leader. Followers that provide a mock GATHER response will vote NO for the PREPARE. Also, added a unit test for an ABC() simultaneous restart once the services already have some data and are not at commitCounter=0.) - Why is the leader reporting that it is forcing a barrier break in messageFollowers()? (Not sure. Added a workaround using "consensus==null" as the test condition to drive the barrier.reset() call.) - What the hell happened to the NotReady exception? (It will cause the follower to fail in prepare2Phase, but we never get past the flush() in this test run.) - Why did the commit the commit never finish? (We were stuck in flush(). Not sure why.) - Modified QuorumPipelineImpl.retrySend() to log @ WARN if we have to do a retrySend(). (Note that it will also log @ ERROR if RobustReplicateTask.call() is unable to send whether or not we then transition into retrySend()). See #720 (HA3 simultaneous start failure) Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHANotifyReleaseTimeRequest.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2013-08-14 12:47:41 UTC (rev 7288) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2013-08-14 14:34:48 UTC (rev 7289) @@ -1300,7 +1300,7 @@ * attempting to robustly replicate a write into a single method. This * was done in order to concentrate any conditional logic and design * rationale into a single method. - * + * <p> * Note: IFF we allow non-leaders to replicate HALog messages then this * assert MUST be changed to verify that the quorum token remains valid * and that this service remains joined with the met quorum, i.e., @@ -1499,10 +1499,15 @@ try { - if (log.isTraceEnabled()) - log.trace("Leader will send: " + b.remaining() + if (log.isInfoEnabled() || retryCount > 0) { + final String msg2 = "Leader will send: " + b.remaining() + " bytes, retryCount=" + retryCount + ", req=" - + req + ", msg=" + msg); + + req + ", msg=" + msg; + if (retryCount > 0) + log.warn(msg2); + else + log.info(msg2); + } // retest while holding lock before sending the message. assertQuorumState(); Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java 2013-08-14 12:47:41 UTC (rev 7288) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java 2013-08-14 14:34:48 UTC (rev 7289) @@ -36,16 +36,22 @@ private final long pinnedCommitTime; private final long pinnedCommitCounter; private final long timestamp; + private final boolean isMock; public HANotifyReleaseTimeRequest(final UUID serviceUUID, final long pinnedCommitTime, final long pinnedCommitCounter, - final long timestamp) { + final long timestamp, final boolean isMock) { if (serviceUUID == null) throw new IllegalArgumentException(); + if (pinnedCommitTime < 0) + throw new IllegalArgumentException(); + if (pinnedCommitCounter < 0) + throw new IllegalArgumentException(); this.serviceUUID = serviceUUID; this.pinnedCommitTime = pinnedCommitTime; this.pinnedCommitCounter = pinnedCommitCounter; this.timestamp = timestamp; + this.isMock = isMock; } @Override @@ -53,7 +59,7 @@ return super.toString() + "{serviceUUID=" + serviceUUID + ",pinnedCommitTime=" + pinnedCommitTime + ",pinnedCommitCounter=" + pinnedCommitCounter + ",timestamp=" - + timestamp + "}"; + + timestamp + ", isMock=" + isMock + "}"; } @Override @@ -76,4 +82,9 @@ return timestamp; } + @Override + public boolean isMock() { + return isMock; + } + } Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHANotifyReleaseTimeRequest.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHANotifyReleaseTimeRequest.java 2013-08-14 12:47:41 UTC (rev 7288) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHANotifyReleaseTimeRequest.java 2013-08-14 14:34:48 UTC (rev 7289) @@ -69,4 +69,20 @@ */ public long getTimestamp(); + /** + * Mock responses are used when a follow is unable to provide a correct + * response (typically because the follower is not yet HAReady and hence is + * not able to participate in the gather). The mock responses preserves + * liveness since the GATHER protocol will terminate quickly. By marking the + * response as a mock object, the leader can differentiate mock responses + * from valid responses and discard the mock responeses. If the GATHER task + * on the follower sends a mock response to the leader, then it will also + * have thrown an exception out of its GatherTask which will prevent the + * follower from voting YES on the PREPARE message for that 2-phase commit. + * + * @see <href="https://sourceforge.net/apps/trac/bigdata/ticket/720" > HA3 + * simultaneous service start failure </a> + */ + public boolean isMock(); + } Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-14 12:47:41 UTC (rev 7288) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-14 14:34:48 UTC (rev 7289) @@ -388,19 +388,19 @@ * @see InnerJournalTransactionService#notifyEarliestCommitTime(IHANotifyReleaseTimeRequest) * @see GatherTask */ - final private Map<UUID, IHANotifyReleaseTimeRequest> responses = new ConcurrentHashMap<UUID, IHANotifyReleaseTimeRequest>(); + final private Map<UUID, IHANotifyReleaseTimeRequest> followerResponses = new ConcurrentHashMap<UUID, IHANotifyReleaseTimeRequest>(); /** - * The value from {@link #responses} associated with the earliest commit + * The value from {@link #followerResponses} associated with the earliest commit * point. This is basis for the "censensus" across the services. */ private IHANotifyReleaseTimeRequest minimumResponse = null; /** * The consensus value. This is a restatement of the data in from the - * {@link #minimumResponse}. + * {@link #minimumResponse}. This is set by {@link #run()}. */ - protected IHANotifyReleaseTimeResponse consensus = null; + protected volatile IHANotifyReleaseTimeResponse consensus = null; // private Quorum<HAGlue,QuorumService<HAGlue>> getQuorum() { // @@ -479,68 +479,92 @@ public void run() { try { - - if (log.isInfoEnabled()) - log.info("leader: " + leadersValue); - - // This is the timestamp from the BarrierState ctor. - final long timeLeader = leadersValue.getTimestamp(); - - // This is the timestamp for right now. - final long timeNow = newConsensusProtocolTimestamp(); - -// // The local clock must be moving forward. -// assertBefore(timeLeader, timeNow); - // Start with the leader's value (from ctor). - minimumResponse = leadersValue; + if (log.isInfoEnabled()) + log.info("leader: " + leadersValue); - for (IHANotifyReleaseTimeRequest response : responses.values()) { + // This is the timestamp from the BarrierState ctor. + final long timeLeader = leadersValue.getTimestamp(); - if (log.isTraceEnabled()) - log.trace("follower: " + response); + // This is the timestamp for right now. + final long timeNow = newConsensusProtocolTimestamp(); - final UUID followerId = response.getServiceUUID(); - - if (minimumResponse.getPinnedCommitCounter() > response - .getPinnedCommitCounter()) { + // // The local clock must be moving forward. + // assertBefore(timeLeader, timeNow); - minimumResponse = response; + // Start with the leader's value (from ctor). + minimumResponse = leadersValue; - } + for (IHANotifyReleaseTimeRequest response : followerResponses + .values()) { - /* - * Verify that the timestamp from the ctor is BEFORE the - * timestamp assigned by the follower in the GatherTask. - */ - assertBefore(leaderId, followerId, timeLeader, - response.getTimestamp()); + if (log.isTraceEnabled()) + log.trace("follower: " + response); - /* - * Verify that the timestamp from the GatherTask on the follower - * is before the timestamp obtained at the top of this run() - * method. - */ - assertBefore(followerId, leaderId, response.getTimestamp(), - timeNow); + if (response.isMock()) { - } + /** + * The mock response should not have any influence on + * the consensus release time. The follower provides the + * mock response when it is unable to execute the + * GatherTask correctly, typically because it is not yet + * HAReady. The mock response preserves liveness for the + * GATHER protocol. The follower that provided the mock + * response will vote NO for the PREPARE because it's + * GatherTask will have thrown out an exception. + * + * @see <href= + * "https://sourceforge.net/apps/trac/bigdata/ticket/720" + * > HA3 simultaneous service start failure </a> + */ - // Restate the consensus as an appropriate message object. - consensus = new HANotifyReleaseTimeResponse( - minimumResponse.getPinnedCommitTime(), - minimumResponse.getPinnedCommitCounter()); + log.warn("Ignoring mock response: " + response); - if (log.isInfoEnabled()) - log.info("consensus: " + consensus); + continue; - } catch(Throwable t) { - + } + + final UUID followerId = response.getServiceUUID(); + + if (minimumResponse.getPinnedCommitCounter() > response + .getPinnedCommitCounter()) { + + minimumResponse = response; + + } + + /* + * Verify that the timestamp from the ctor is BEFORE the + * timestamp assigned by the follower in the GatherTask. + */ + assertBefore(leaderId, followerId, timeLeader, + response.getTimestamp()); + + /* + * Verify that the timestamp from the GatherTask on the + * follower is before the timestamp obtained at the top of + * this run() method. + */ + assertBefore(followerId, leaderId, response.getTimestamp(), + timeNow); + + } + + // Restate the consensus as an appropriate message object. + consensus = new HANotifyReleaseTimeResponse( + minimumResponse.getPinnedCommitTime(), + minimumResponse.getPinnedCommitCounter()); + + if (log.isInfoEnabled()) + log.info("consensus: " + consensus); + + } catch (Throwable t) { + // Set the cause. cause = t; - + } - + } /** @@ -811,21 +835,33 @@ * cancelled. These are local Futures that do RMIs. The RMIs * should not block when the execute on the follower. */ + for (Future<Void> f : futures) { + f.cancel(true/* mayInterruptIfRunning */); + } - if (!barrier.isBroken()) { + + if (consensus == null) { + /* * If there were any followers that did not message the - * leader and cause the barrier to be decremented, then we - * need to decrement the barrier for those followers now in - * order for it to break. + * leader and cause the barrier to be decremented and hence + * the [consensus] to become defined, then we need to + * decrement the barrier for those followers now in order + * for it to break. * * There is no method to decrement by a specific number * (unlike a semaphore), but you can reset() the barrier, * which will cause a BrokenBarrierException for all Threads * waiting on the barrier. * + * Note: It appears that [barrier.isBoken()] always reports + * [false] here. Hence, the test was changed to + * [consensus==null]. See <href= + * "https://sourceforge.net/apps/trac/bigdata/ticket/720" > + * HA3 simultaneous service start failure </a> + * * FIXME HA TXS: A reset() here does not allow us to proceed * with the consensus protocol unless all services * "vote yes". Thus, a single node failure during the @@ -833,10 +869,15 @@ * fail. [Actually, we could use getNumberWaiting(). If it * is a bare majority, then we could take the barrier break * action ourselves. E.g., in the thread that calls - * barrier.reset()]. + * barrier.reset()]. [Actually, this might not be a problem + * for cases where the GatherTask is able to send back a + * mock IHANotifyReleaseTimeRequest message.] */ + log.error("Forcing barrier break"); + barrier.reset(); + } }// finally @@ -1368,7 +1409,7 @@ final long now = newConsensusProtocolTimestamp(); final IHANotifyReleaseTimeRequest req = new HANotifyReleaseTimeRequest( - serviceId, commitTime, commitCounter, now); + serviceId, commitTime, commitCounter, now, false/* isMock */); if (log.isTraceEnabled()) log.trace("releaseTime=" + getReleaseTime()// @@ -1613,17 +1654,21 @@ if (!didNotifyLeader) { - /* + /** * Send mock response to the leader so it does not block - * forever waiting for our response. The mock response MUST - * include our correct serviceId. + * forever waiting for our response. The mock response + * MUST include our correct serviceId. + * + * @see <href= + * "https://sourceforge.net/apps/trac/bigdata/ticket/720" + * > HA3 simultaneous service start failure </a> */ try { final IHANotifyReleaseTimeRequest resp = new HANotifyReleaseTimeRequest( serviceId, 0L/* pinnedCommitTime */, - 1L/* pinnedCommitCounter */, - nextTimestamp()/* timestamp */); + 0L/* pinnedCommitCounter */, + nextTimestamp()/* timestamp */, true/* isMock */); log.warn("Sending mock response for gather protocol: cause=" + t); // Will block until barrier breaks on leader. @@ -1634,6 +1679,10 @@ } + /* + * This exception will force PREPARE to fail on this service + * when it checks the GatherTask's Future. + */ throw new Exception(t); } finally { @@ -1700,7 +1749,7 @@ final UUID followerId = req.getServiceUUID(); // Make a note of the message from this follower. - barrierState.responses.put(followerId, req); + barrierState.followerResponses.put(followerId, req); } catch(RuntimeException e) { @@ -1729,14 +1778,19 @@ try { if (haLog.isInfoEnabled()) { - haLog.info("Awaiting barrier: #responses=" - + barrierState.responses.size() + ", #parties=" + haLog.info("Awaiting barrier: #followerResponses=" + + barrierState.followerResponses.size() + ", #parties=" + barrierState.barrier.getParties() + ", #joinedUUIDs=" + barrierState.joinedServiceIds.length); } } finally { - // follower blocks on Thread on the leader here. + /* + * Follower blocks on Thread on the leader here. + * + * Note: This will thrown InterruptedException -or- + * BarrierBrokenException if the barrier is reset(). + */ barrierState.barrier.await(); } Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-08-14 12:47:41 UTC (rev 7288) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-08-14 14:34:48 UTC (rev 7289) @@ -2128,16 +2128,37 @@ */ final HAGlue serverA, serverB, serverC; - /** - * Start of 3 HA services (this happens in the ctor). - * - * @param sequential - * True if the startup should be sequential or false - * if services should start concurrently. - * @throws Exception - */ - public ABC(final boolean sequential) throws Exception { + /** + * Start of 3 HA services (this happens in the ctor). + * + * @param sequential + * True if the startup should be sequential or false + * if services should start concurrently. + * @throws Exception + */ + public ABC(final boolean sequential) + throws Exception { + this(true/* sequential */, true/* newServiceStarts */); + + } + + /** + * Start of 3 HA services (this happens in the ctor). + * + * @param sequential + * True if the startup should be sequential or false if + * services should start concurrently. + * @param newServiceStarts + * When <code>true</code> the services are new, the database + * should be at <code>commitCounter:=0</code> and the + * constructor will check for the implicit create of the + * default KB. + * @throws Exception + */ + public ABC(final boolean sequential, final boolean newServiceStarts) + throws Exception { + if (sequential) { final HAGlue[] services = startSequenceABC(); @@ -2171,8 +2192,10 @@ // wait for the quorum to fully meet. awaitFullyMetQuorum(); - // wait for the initial commit point (KB create). - awaitCommitCounter(1L, serverA, serverB, serverC); + if(newServiceStarts) { + // wait for the initial commit point (KB create). + awaitCommitCounter(1L, serverA, serverB, serverC); + } } Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-08-14 12:47:41 UTC (rev 7288) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-08-14 14:34:48 UTC (rev 7289) @@ -512,6 +512,67 @@ } /** + * Unit test of the ability to go through a simultaneous restart of all + * services once those services are no longer at commit point 0. Two + * services will meet on the lastCommitTime. The third will need to RESYNC + * and then join. This test provides converage of the RESYNC and JOIN + * transitions when the database is not empty. + */ + public void testStartABC_RestartAllSimultaneous() throws Exception { + + // Start simultaneous. + ABC servers = new ABC(true); + + /* + * Now go through a commit point with a met quorum. + */ + simpleTransaction(); + + // Current commit point. + final long lastCommitCounter2 = 2; + + // Await 2nd commit point on all services. + awaitCommitCounter(lastCommitCounter2, servers.serverA, + servers.serverB, servers.serverC); + + /* + * Shutdown ALL services. + */ + + shutdownA(); + shutdownB(); + shutdownC(); + + /* + * Start simultaneous (again). + * + * Note: Since these are not new service starts, we are only awaiting + * the fully met quorum. Hence I am NOT re-verifying that the services + * are at the same commit point (they should be of course) because I + * would like to promote a data race for the UPDATE with the MEET + + * RESYNC. + */ + servers = new ABC(true, false/* newServiceStarts */); + +// // Should be at the same commit point on all services. +// awaitCommitCounter(lastCommitCounter2, servers.serverA, +// servers.serverB, servers.serverC); + + /* + * Now go through a commit point with a met quorum. + */ + simpleTransaction(); + + // Current commit point. + final long lastCommitCounter3 = 3; + + // Await 3rd commit point on all services. + awaitCommitCounter(lastCommitCounter3, servers.serverA, + servers.serverB, servers.serverC); + + } + + /** * TWO (2) committed transactions then at 3000ms delay between each * subsequent transaction. * <P> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-14 12:47:52
|
Revision: 7288 http://bigdata.svn.sourceforge.net/bigdata/?rev=7288&view=rev Author: thompsonbry Date: 2013-08-14 12:47:41 +0000 (Wed, 14 Aug 2013) Log Message: ----------- The change set above lead to a code path where replicateAndApplyWriteSet() could fail to transition to RunMet as described for this code path: {{{ /* * This can happen if there is a data race with a live write * that is the first write cache block for the write set * that that we would replicate from the ResyncTask. In this * case, we have lost the race to the live write and this * service has already joined as a follower. We can safely * return here since the test in this if() is the same as * the condition variable in the loop for the ResyncTask. * * @see #resyncTransitionToMetQuorum() */ if (journal.getHAReady() != token) { /* * Service must be HAReady before exiting RESYNC * normally. */ throw new AssertionError(); } }}} The code used to do a return at this point. The loop in the caller would then terminate since the service was joined. Since the loop in the caller is now <code>while(true) {...}</code>, this code path now needs to do an explicit transition into another run state (RunMet). Note that the code path does verify that the service is not only JOINED but also HAReady. Therefore, all we need to do is add the transition to RunMet: {{{ // Transition to RunMet. enterRunState(new RunMetTask(token, leaderId)); }}} @see #720 (HA3 Simultaneous start failure) Modified Paths: -------------- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java Modified: branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-14 12:09:30 UTC (rev 7287) +++ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-14 12:47:41 UTC (rev 7288) @@ -2479,6 +2479,9 @@ * The quorum leader (RMI interface). This is fixed until the * quorum breaks. */ + + final UUID leaderId = getQuorum().getLeaderId(); + final S leader = getLeader(token); /* @@ -2498,7 +2501,8 @@ .getCommitCounter(); // Replicate and apply the next write set - replicateAndApplyWriteSet(leader, token, commitCounter + 1); + replicateAndApplyWriteSet(leaderId, leader, token, + commitCounter + 1); } @@ -2526,10 +2530,10 @@ * @throws ExecutionException * @throws InterruptedException */ - private void replicateAndApplyWriteSet(final S leader, - final long token, final long closingCommitCounter) - throws FileNotFoundException, IOException, - InterruptedException, ExecutionException { + private void replicateAndApplyWriteSet(final UUID leaderId, + final S leader, final long token, + final long closingCommitCounter) throws FileNotFoundException, + IOException, InterruptedException, ExecutionException { if (leader == null) throw new IllegalArgumentException(); @@ -2651,9 +2655,13 @@ */ throw new AssertionError(); } + + // Transition to RunMet. + enterRunState(new RunMetTask(token, leaderId)); - return; - + // Force immediate exit of the resync protocol. + throw new InterruptedException(); + } /* This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-14 12:09:36
|
Revision: 7287 http://bigdata.svn.sourceforge.net/bigdata/?rev=7287&view=rev Author: thompsonbry Date: 2013-08-14 12:09:30 +0000 (Wed, 14 Aug 2013) Log Message: ----------- Added logging at the start of the GATHER protocol. Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-14 12:00:52 UTC (rev 7286) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-14 12:09:30 UTC (rev 7287) @@ -50,6 +50,8 @@ import org.apache.log4j.Logger; +import cern.colt.Arrays; + import com.bigdata.bfs.BigdataFileSystem; import com.bigdata.bfs.GlobalFileSystemHelper; import com.bigdata.bop.engine.QueryEngine; @@ -1123,6 +1125,11 @@ long remaining = nanos; final long token = getQuorum().token(); + + if (haLog.isInfoEnabled()) + haLog.info("GATHER PROTOCOL: token=" + token + + ", joinedServiceIds=" + + Arrays.toString(joinedServiceIds)); final BarrierState barrierState; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-14 12:00:59
|
Revision: 7286 http://bigdata.svn.sourceforge.net/bigdata/?rev=7286&view=rev Author: thompsonbry Date: 2013-08-14 12:00:52 +0000 (Wed, 14 Aug 2013) Log Message: ----------- We have identified a problem where ResyncTask.doRun() could exit normally if the service entered the JOIN[] for the ZKQuorum even through it had not successfully set the HAReady token. @see #720 (Start simultaneous failure) Modified Paths: -------------- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java Modified: branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-13 17:53:19 UTC (rev 7285) +++ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-14 12:00:52 UTC (rev 7286) @@ -2481,19 +2481,18 @@ */ final S leader = getLeader(token); - // Until joined with the met quorum. - while (!getQuorum().getMember().isJoinedMember(token)) { + /* + * Loop until joined with the met quorum (and HAReady). + * + * Note: The transition will occur atomically either when we + * catch up with the live write or at a commit point. + * + * Note: This loop will go through an abnormal exit if the + * quorum breaks or reforms (thrown error). The control when + * then pass through the ErrorTask and back into SeekConsensus. + */ + while (true) { - // This service should not be joined yet (HAReady==-1). - final long haReady = journal.getHAReady(); - - if (haReady != Quorum.NO_QUORUM) { - - throw new AssertionError( - "HAReady: Expecting NO_QUOURM, not " + haReady); - - } - // The current commit point on the local store. final long commitCounter = journal.getRootBlockView() .getCommitCounter(); @@ -2502,10 +2501,7 @@ replicateAndApplyWriteSet(leader, token, commitCounter + 1); } - - // Done - return null; - + } } // class ResyncTask @@ -2647,15 +2643,32 @@ * * @see #resyncTransitionToMetQuorum() */ - + + if (journal.getHAReady() != token) { + /* + * Service must be HAReady before exiting RESYNC + * normally. + */ + throw new AssertionError(); + } + return; + } /* * Since we are not joined, the HAReady token must not have been * set. */ - assert journal.getHAReady() == Quorum.NO_QUORUM; + if (journal.getHAReady() != Quorum.NO_QUORUM) { + /* + * The HAReady token is set by setQuorumToken() and this + * should have been done atomically in runWithBarrierLock(). + * Thus, it is a problem if the HAReady token is set here to + * any valid token value. + */ + throw new AssertionError(); + } journal.getHALogNexus().disableHALog(); journal.getHALogNexus().createHALog(openRootBlock); } finally { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-13 17:53:31
|
Revision: 7285 http://bigdata.svn.sourceforge.net/bigdata/?rev=7285&view=rev Author: thompsonbry Date: 2013-08-13 17:53:19 +0000 (Tue, 13 Aug 2013) Log Message: ----------- Removed import for javadoc from the wrong package space. Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java Modified: branches/READ_CACHE2/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java =================================================================== --- branches/READ_CACHE2/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java 2013-08-13 17:51:00 UTC (rev 7284) +++ branches/READ_CACHE2/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java 2013-08-13 17:53:19 UTC (rev 7285) @@ -27,6 +27,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; +import java.nio.channels.Selector; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; @@ -41,7 +42,6 @@ import com.bigdata.util.ChecksumError; import com.bigdata.util.ChecksumUtility; import com.bigdata.util.InnerCause; -import com.sun.corba.se.pept.transport.Selector; /** * Test the raw socket protocol implemented by {@link HASendService} and This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-13 17:51:19
|
Revision: 7284 http://bigdata.svn.sourceforge.net/bigdata/?rev=7284&view=rev Author: thompsonbry Date: 2013-08-13 17:51:00 +0000 (Tue, 13 Aug 2013) Log Message: ----------- Added logic to tarball the HAtest results for archiving in CI runs. Modified Paths: -------------- branches/READ_CACHE2/build.xml Modified: branches/READ_CACHE2/build.xml =================================================================== --- branches/READ_CACHE2/build.xml 2013-08-13 13:48:09 UTC (rev 7283) +++ branches/READ_CACHE2/build.xml 2013-08-13 17:51:00 UTC (rev 7284) @@ -2167,6 +2167,9 @@ <!-- Archive the generated HTML report. --> <tar destfile="${test.results.dir}/report.tgz" basedir="${test.results.dir}/report" compression="gzip"/> + <!-- Archive the HA test suite output logs. --> + <tar destfile="${test.results.dir}/HAtest-report.tgz" basedir="${HAtest.dir}" compression="gzip"/> + </target> <target name="clean-sparql-test-suite" description="delete the files unpacked by the Sesame SPARQL test suite."> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-13 13:48:19
|
Revision: 7283 http://bigdata.svn.sourceforge.net/bigdata/?rev=7283&view=rev Author: thompsonbry Date: 2013-08-13 13:48:09 +0000 (Tue, 13 Aug 2013) Log Message: ----------- Added logic to clear out the HA test results before a CI junit run. Modified Paths: -------------- branches/READ_CACHE2/build.xml Modified: branches/READ_CACHE2/build.xml =================================================================== --- branches/READ_CACHE2/build.xml 2013-08-12 17:13:31 UTC (rev 7282) +++ branches/READ_CACHE2/build.xml 2013-08-13 13:48:09 UTC (rev 7283) @@ -1968,10 +1968,16 @@ <echo> </echo> + <!-- Clear out the old test results. --> <property name="test.results.dir" location="${classes.test.dir}/test-results" /> <delete dir="${test.results.dir}" quiet="true" /> <mkdir dir="${test.results.dir}" /> + <!-- Clear out the old HA test suite logs. --> + <property name="HAtest.dir" location="benchmark/CI-HAJournal-1" /> + <delete dir="${HAtest.dir}" quiet="true" /> + <mkdir dir="${HATest.dir}" /> + <condition property="testClass" value="${testName}"> <isset property="testName" /> </condition> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-12 17:13:44
|
Revision: 7282 http://bigdata.svn.sourceforge.net/bigdata/?rev=7282&view=rev Author: thompsonbry Date: 2013-08-12 17:13:31 +0000 (Mon, 12 Aug 2013) Log Message: ----------- - Removed QuorumService.didMeet(). This method was already deprecated and not used. - AbstractJournal.setQuorumToken(). Further simplified this method. In particular, if the service was not HAReady and the quorum token was unchanged, then this condition does not cause an AssertionError to be thrown. - AbstractJournal: An issue was identified where a follow was failing to wait on the GatherTask before checking to see if the local release time was consistent with the release time in the PREPARE message. This could cause the following exception: {{{ ERROR: 5146164 2013-08-09 14:31:20,395 com.bigdata.rdf.sail.webapp.BigdataRDFContext.queryService15 com.bigdata.ha.QuorumCommitImpl.prepare2Phase(QuorumCommitImpl.java:375): java.util.concurrent.ExecutionException: java.lang.AssertionError: Local service does not agree with consensusReleaseTime: localReleaseTime=1376073078564, expectedReleaseTime=1376073078984, consensusReleaseTime=com.bigdata.ha.msg.HANotifyReleaseTimeResponse@287ca2ca{commitTime=1376073078985,commitCounter=782} java.util.concurrent.ExecutionException: java.lang.AssertionError: Local service does not agree with consensusReleaseTime: localReleaseTime=1376073078564, expectedReleaseTime=1376073078984, consensusReleaseTime=com.bigdata.ha.msg.HANotifyReleaseTimeResponse@287ca2ca{commitTime=1376073078985,commitCounter=782} }}} The fix was to Prepare2Phase.call() to await the Future for the gather task before making this test. - Journal: log @ error if we force a barrier break. We still have a problem here where the GATHER is not robust to a service failure. - SnapshotManager.isReadyToSnapshot(): modified to use IBufferStrategy.getExtent() in order to avoid a lock in the RWStore that could be contended by a commit. - HAJournalServer: improved logging for runState changes. - HAJournalServer: SeekConsensus now calls processEvents() after getActor().pipelineAdd() to ensure that the HASendService and/or HAReceiveService are setup. There are issues where activities are not cancelled / interrupted if their invariants change. Examples include: - RESYNC: If the quorum breaks, then RESYNC should terminate. Likewise, if the HAReceiveService is not setup, RESYNC should terminate. - SNAPSHOT: Should terminate if quorum breaks or not HAReady (this is implemented though hooking setQuorumToken() in HAJournal) - sendHALogForWriteSet(): Should terminate if receiver leaves pipeline, if sender is not quorum leader, etc. - sendStore(): Should terminate if receiver leaves pipeline, if sender is not quorum leader, etc. There may be other examples of computations that need to be terminated on an abort or a transition from HAReady to NotReady. See #695. Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/QuorumService.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java branches/READ_CACHE2/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/QuorumService.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/QuorumService.java 2013-08-09 23:15:33 UTC (rev 7281) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/QuorumService.java 2013-08-12 17:13:31 UTC (rev 7282) @@ -124,24 +124,6 @@ final IRootBlockView rootBlock1); /** - * Callback method. - * - * @param token - * The token on which the service joined a met quorum. - * @param commitCounter - * The commitCounter for the local service. - * @param isLeader - * <code>true</code> iff the local service is the quorum leader. - */ - /* - * I added this in but wound up not needed it. Do not use without good - * justification. - */ - @Deprecated - void didMeet(final long token, final long commitCounter, - final boolean isLeader); - - /** * Enter an error state. The error state should take whatever corrective * actions are necessary in order to prepare the service for continued * operations. Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-09 23:15:33 UTC (rev 7281) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-12 17:13:31 UTC (rev 7282) @@ -3626,6 +3626,7 @@ } + @Override public void force(final boolean metadata) { assertOpen(); @@ -3634,12 +3635,14 @@ } + @Override public long size() { return _bufferStrategy.size(); } + @Override public ByteBuffer read(final long addr) { assertOpen(); @@ -3649,6 +3652,7 @@ } + @Override public long write(final ByteBuffer data) { assertCanWrite(); @@ -3657,6 +3661,7 @@ } + @Override public long write(final ByteBuffer data, final IAllocationContext context) { assertCanWrite(); @@ -3704,6 +3709,7 @@ // Note: NOP for WORM. Used by RW for eventual recycle protocol. + @Override public void delete(final long addr) { assertCanWrite(); @@ -3712,6 +3718,7 @@ } + @Override public void delete(final long addr, final IAllocationContext context) { assertCanWrite(); @@ -3728,6 +3735,7 @@ } + @Override public void detachContext(final IAllocationContext context) { assertCanWrite(); @@ -3740,6 +3748,7 @@ } + @Override public void abortContext(final IAllocationContext context) { assertCanWrite(); @@ -3752,6 +3761,7 @@ } + @Override public void registerContext(final IAllocationContext context) { assertCanWrite(); @@ -3764,6 +3774,7 @@ } + @Override final public long getRootAddr(final int index) { final ReadLock lock = _fieldReadWriteLock.readLock(); @@ -4394,6 +4405,7 @@ * also write on this index. I have tried some different approaches to * handling this. */ + @Override public ICommitRecord getCommitRecord(final long commitTime) { if (isHistoryGone(commitTime)) @@ -4487,6 +4499,7 @@ * than having an inner delegate for the mutable view. The local/remote * issue is more complex. */ + @Override public IIndex getIndex(final String name, final long commitTime) { return (BTree) getIndexLocal(name, commitTime); @@ -4514,7 +4527,7 @@ * cache for access to historical index views on the Journal by name * and commitTime. </a> */ -// @Override TODO Add @Override once change in IBTreeManager merged into READ_CACHE branch. + @Override final public ICheckpointProtocol getIndexLocal(final String name, final long commitTime) { @@ -4922,6 +4935,7 @@ * Note: You MUST {@link #commit()} before the registered index will be * either restart-safe or visible to new transactions. */ + @Override final public void registerIndex(final IndexMetadata metadata) { if (metadata == null) @@ -4968,6 +4982,7 @@ * * @deprecated by {@link #register(String, IndexMetadata)} */ + @Override final public BTree registerIndex(final String name, final IndexMetadata metadata) { validateIndexMetadata(name, metadata); @@ -4992,7 +5007,7 @@ * * @see Checkpoint#create(IRawStore, IndexMetadata) */ -// @Override TODO Add @Override once change in IBTreeManager merged into READ_CACHE branch. + @Override public ICheckpointProtocol register(final String name, final IndexMetadata metadata) { @@ -5004,6 +5019,7 @@ } + @Override final public BTree registerIndex(final String name, final BTree ndx) { _register(name, ndx); @@ -5063,6 +5079,7 @@ * commits and will not be visible to new transactions. Storage will be * reclaimed IFF the backing store support that functionality. */ + @Override public void dropIndex(final String name) { final ICheckpointProtocol ndx = getUnisolatedIndex(name); @@ -5112,6 +5129,7 @@ } + @Override public Iterator<String> indexNameScan(final String prefix, final long timestamp) { @@ -5189,32 +5207,32 @@ * * @see #getLiveView(String, long) */ + @Override final public BTree getIndex(final String name) { return (BTree) getUnisolatedIndex(name); } - /** - * Return the mutable view of the named index (aka the "live" or - * {@link ITx#UNISOLATED} index). This object is NOT thread-safe. You MUST - * NOT write on this index unless you KNOW that you are the only writer. See - * {@link ConcurrencyManager}, which handles exclusive locks for - * {@link ITx#UNISOLATED} indices. - * - * @return The mutable view of the index. - * - * @see #getUnisolatedIndex(String) - * - * @deprecated Use {@link #getUnisolatedIndex(String)} - */ -// TODO Remove method once change in IBTreeManager merged into READ_CACHE branch. - @Deprecated - final public HTree getHTree(final String name) { - - return (HTree) getUnisolatedIndex(name); - - } +// /** +// * Return the mutable view of the named index (aka the "live" or +// * {@link ITx#UNISOLATED} index). This object is NOT thread-safe. You MUST +// * NOT write on this index unless you KNOW that you are the only writer. See +// * {@link ConcurrencyManager}, which handles exclusive locks for +// * {@link ITx#UNISOLATED} indices. +// * +// * @return The mutable view of the index. +// * +// * @see #getUnisolatedIndex(String) +// * +// * @deprecated Use {@link #getUnisolatedIndex(String)} +// */ +// @Deprecated +// final public HTree getHTree(final String name) { +// +// return (HTree) getUnisolatedIndex(name); +// +// } // /** // * Return the mutable view of the named index (aka the "live" or @@ -5239,7 +5257,7 @@ * * @return The mutable view of the persistence capable data structure. */ -// @Override TODO Add @Override once change in IBTreeManager merged into READ_CACHE branch. + @Override final public ICheckpointProtocol getUnisolatedIndex(final String name) { final ReadLock lock = _fieldReadWriteLock.readLock(); @@ -5279,22 +5297,27 @@ * IAddressManager */ + @Override final public long getOffset(long addr) { return _bufferStrategy.getOffset(addr); } + @Override final public long getPhysicalAddress(long addr) { return _bufferStrategy.getAddressManager().getPhysicalAddress(addr); } + @Override final public int getByteCount(long addr) { return _bufferStrategy.getByteCount(addr); } + @Override final public long toAddr(int nbytes, long offset) { return _bufferStrategy.toAddr(nbytes, offset); } + @Override final public String toString(long addr) { return _bufferStrategy.toString(addr); } @@ -5355,160 +5378,8 @@ final QuorumTokenTransitions transitionState = new QuorumTokenTransitions( quorumToken, newValue, localService, haReadyToken); -// /* -// * The token is [volatile]. Save it's state on entry. Figure out if this -// * is a quorum meet or a quorum break. -// */ -// { -// /* -// * TODO: remove this code once the refactoring with QuorumTokenTransitions is stable, right -// * now it is used to sanity check the new code. -// */ -// final long oldValue = quorumToken; -// final long oldReady = haReadyToken; -// final HAStatusEnum oldStatus = haStatus; -// -// if (haLog.isInfoEnabled()) -// haLog.info("oldValue=" + oldValue + ", newToken=" + newValue -// + ", oldReady=" + oldReady); -// -// /* -// * Note that previously the noTokenChange condition was a short -// * circuit exit. -// * -// * This has been removed so we must account for this condition to -// * determine correct transition -// */ -// final boolean noTokenChange = oldValue == newValue -// && oldValue == oldReady; -// // if (oldValue == newValue && oldValue == oldReady) { -// // log.warn("NO TOKEN CHANGE"); -// // // No change. -// // return; -// // -// // } -// -// final boolean didBreak; -// final boolean didMeet; -// final boolean didJoinMetQuorum; -// final boolean didLeaveMetQuorum; -// final boolean isJoined = localService != null -// && localService.isJoinedMember(newValue); -// -// final boolean wasJoined = oldReady != Quorum.NO_QUORUM; -// -// /** -// * Adding set of initial conditions to account for noTokenChange -// * -// * TODO: should there be more than one initial condition? -// */ -// if (noTokenChange && isJoined) { -// didBreak = false; // quorum break. -// didMeet = false; -// didJoinMetQuorum = false; // true; -// didLeaveMetQuorum = false; // haReadyToken != Quorum.NO_QUORUM; -// // // if service was joined with met -// // quorum, then it just left the met -// // quorum. -// } else if (newValue == Quorum.NO_QUORUM -// && oldValue != Quorum.NO_QUORUM) { -// -// /* -// * Quorum break. -// * -// * Immediately invalidate the token. Do not wait for a lock. -// */ -// -// this.quorumToken = newValue; -// -// didBreak = true; // quorum break. -// didMeet = false; -// didJoinMetQuorum = false; -// didLeaveMetQuorum = wasJoined; // if service was joined with met -// // quorum, then it just left the -// // met quorum. -// -// } else if (newValue != Quorum.NO_QUORUM -// && oldValue == Quorum.NO_QUORUM) { -// -// /* -// * Quorum meet. -// * -// * We must wait for the lock to update the token. -// */ -// -// didBreak = false; -// didMeet = true; // quorum meet. -// didJoinMetQuorum = false; -// didLeaveMetQuorum = false; -// -// } else if (newValue != Quorum.NO_QUORUM // quorum exists -// && oldReady == Quorum.NO_QUORUM // service was not joined -// // with met quorum. -// && isJoined // service is now joined with met quorum. -// ) { -// -// /* -// * This service is joining a quorum that is already met. -// */ -// -// didBreak = false; -// didMeet = false; -// didJoinMetQuorum = true; // service joined with met quorum. -// didLeaveMetQuorum = false; -// -// } else if (newValue != Quorum.NO_QUORUM // quorum exists -// && wasJoined // service was joined with met quorum -// && !isJoined // service is no longer joined with met quorum. -// ) { -// -// /* -// * This service is leaving a quorum that is already met (but -// * this is not a quorum break since the new token is not -// * NO_QUORUM). -// */ -// -// didBreak = false; -// didMeet = false; -// didJoinMetQuorum = false; -// didLeaveMetQuorum = true; // service left met quorum. quorum -// // still met. -// -// } else { -// -// // /* -// // * No change in state. -// // */ -// // -// // log.warn("No change"// -// // + ": qorumToken(" + oldValue + " => " + newValue + ")"// -// // + ", haReadyToken(" + haReadyToken + ")"// -// // ); -// -// didBreak = false; -// didMeet = false; -// didJoinMetQuorum = false; -// didLeaveMetQuorum = false; -// -// return; -// -// } -// -// log.warn("didBreak: " + didBreak -// + ", didMeet: " + didMeet -// + ", didJoinMetQuorum: " + didJoinMetQuorum -// + ", didLeaveMetQuorum: " + didLeaveMetQuorum -// + ", isJoined: " + isJoined -// + ", wasJoined: " + wasJoined -// ); -// -// assert didBreak == transitionState.didBreak; -// assert didMeet == transitionState.didMeet; -// assert didJoinMetQuorum == transitionState.didJoinMetQuorum; -// assert didLeaveMetQuorum == transitionState.didLeaveMetQuorum; -// assert isJoined == transitionState.isJoined; -// assert wasJoined == transitionState.wasJoined; -// } + if (haLog.isInfoEnabled()) + haLog.info(transitionState.toString()); if (transitionState.didBreak) { /* @@ -5533,54 +5404,44 @@ /* * Both a meet and a break require an exclusive write lock. + * + * TODO: Is this lock synchronization a problem? With token update + * delayed on a lock could a second thread process a new token based on + * incorrect state since the first thread has not updated the token? For + * example: NO_TOKEN -> valid token -> NO_TOKEN */ - final boolean isLeader; - final boolean isFollower; - final long localCommitCounter; - - /* - * TODO: Is this lock synchronization a problem? With token update delayed on a lock could a second thread - * process a new token based on incorrect state since the first thread has not updated the token? - * For example: NO_TOKEN -> valid token -> NO_TOKEN - */ final WriteLock lock = _fieldReadWriteLock.writeLock(); lock.lock(); try { - - /* - * The following condition tests are slightly confusing, it is not clear that they - * represent all real states. - * - * Essentially - * didBreak - abort - * didLeaveMetQuorum - abort - * didJoinMetQuorum - follower gets rootBlocks - * didMeet - just sets token - * - * Are there other valid states? - * - * If a void call is made - no token change, no leave or join - then this should - * result in an assertion error. - * - * FIXME: It is possible that these are being thrown and the quorum is then re-forming - * but we need to be able to trap these errors (if they occur) to understand the circumstances. - * It may well be that this is the source of the stochastic failures we see. - */ + /** + * The following condition tests are slightly confusing, it is not + * clear that they represent all real states. + * + * <pre> + * Essentially: + * didBreak - abort + * didLeaveMetQuorum - abort + * didJoinMetQuorum - follower gets rootBlocks + * didMeet - just sets token + * </pre> + * + * In addition, there is a case where a service is joined as + * perceived by the ZKQuorum but not yet HAReady. If a 2-phase + * commit is initiated, then the service will enter an error state + * (because it is not yet HAReady). This net-zero change case is + * explicitly handled below. + */ + if (transitionState.didLeaveMetQuorum) { /* * The service was joined with a met quorum. - * - * TODO Is it okay to set this token prior to the abort methods? */ quorumToken = newValue; // volatile write. - localCommitCounter = -1; - isLeader = isFollower = false; - /* * We also need to discard any active read/write tx since there * is no longer a quorum and a read/write tx was running on the @@ -5625,26 +5486,32 @@ */ quorumToken = Quorum.NO_QUORUM; // volatile write. - - localCommitCounter = -1; - isLeader = isFollower = false; - } else if (transitionState.didMeet || transitionState.didJoinMetQuorum) { - - /** - * TODO: This state is a bit confused, is it possible that both conditions - * are true? Could we already be joined at the time we learn that the quorum is - * met? If not then it might make more sense to test separately - */ + haReadyToken = Quorum.NO_QUORUM; // volatile write. + + haStatus = HAStatusEnum.NotReady; // volatile write. + + haReadyCondition.signalAll(); // signal ALL. + + } else if (transitionState.didMeet + || transitionState.didJoinMetQuorum) { + /** + * Either a quorum meet (didMeet:=true) or the service is + * joining a quorum that is already met (didJoinMetQuorum). + */ + final long tmp; - + quorumToken = newValue; boolean installedRBs = false; - localCommitCounter = _rootBlock.getCommitCounter(); - + final long localCommitCounter = _rootBlock.getCommitCounter(); + + final boolean isLeader; + final boolean isFollower; + if (localService.isFollower(newValue)) { isLeader = false; @@ -5770,7 +5637,36 @@ } else { - throw new AssertionError("VOID setToken");// FIXME HA-ABC + /* + * Did not (leave|break|meet|join). + */ + + if (haReadyToken != Quorum.NO_QUORUM) { + + /* + * We should not be here if this service is HAReady. + */ + throw new AssertionError("VOID setToken"); + + } + + /* + * We are not joined. No change in token or HAReadyToken. + * + * Note: This can occur (for example) if we are not yet joined + * and an error occurs during our attempt to join with a met + * quorum. One observed example is when this service is in the + * joined[] for zookeeper and therefore is messaged as part of + * the GATHER or PREPARE protocols for a 2-phase commit, but the + * service is not yet HAReady and therefore enters an error + * state rather than completing the 2-phase commit protocol + * successfully. When setQuorumToken() is called from the error + * handling task, the haReadyToken is already cleared. Unless + * the quorum also breaks, the quorum token will be unchanged. + * Hence we did not (leave|break|meet|join). + */ + + // Fall through. } @@ -5780,15 +5676,6 @@ } - if (haLog.isInfoEnabled()) - haLog.info(transitionState.toString()); - - if (isLeader || isFollower) { - - localService.didMeet(newValue, localCommitCounter, isLeader); - - } - } private final Condition haReadyCondition = _fieldReadWriteLock.writeLock().newCondition(); private volatile long haReadyToken = Quorum.NO_QUORUM; @@ -5797,20 +5684,20 @@ */ private volatile HAStatusEnum haStatus = HAStatusEnum.NotReady; - /** - * Await the service being ready to partitipate in an HA quorum. The - * preconditions include: - * <ol> - * <li>receiving notice of the quorum token via - * {@link #setQuorumToken(long)}</li> - * <li>The service is joined with the met quorum for that token</li> - * <li>If the service is a follower and it's local root blocks were at - * <code>commitCounter:=0</code>, then the root blocks from the leader have - * been installed on the follower.</li> - * <ol> - * - * @return the quorum token for which the service became HA ready. - */ +// /** +// * Await the service being ready to partitipate in an HA quorum. The +// * preconditions include: +// * <ol> +// * <li>receiving notice of the quorum token via +// * {@link #setQuorumToken(long)}</li> +// * <li>The service is joined with the met quorum for that token</li> +// * <li>If the service is a follower and it's local root blocks were at +// * <code>commitCounter:=0</code>, then the root blocks from the leader have +// * been installed on the follower.</li> +// * <ol> +// * +// * @return the quorum token for which the service became HA ready. +// */ // final public long awaitHAReady() throws InterruptedException, // AsynchronousQuorumCloseException, QuorumException { // final WriteLock lock = _fieldReadWriteLock.writeLock(); @@ -6808,7 +6695,25 @@ .getConsensusReleaseTime(); { - + + if (oldFuture != null) { + + /* + * If we ran the GATHER task, then we must await the + * outcome of the GATHER on this service before we + * can verify that the local consensus release time + * is consistent with the GATHER. + * + * Note: If the oldFuture is null, then the service + * just joined and was explicitly handed the + * consensus release time and hence should be + * consistent here anyway. + */ + + oldFuture.get(); + + } + final long localReleaseTime = getLocalTransactionManager() .getTransactionService().getReleaseTime(); @@ -6824,7 +6729,8 @@ + ", expectedReleaseTime=" + expectedReleaseTime + ", consensusReleaseTime=" - + consensusReleaseTime); + + consensusReleaseTime + + ", serviceId=" + getServiceId()); } Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-09 23:15:33 UTC (rev 7281) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-12 17:13:31 UTC (rev 7282) @@ -833,6 +833,7 @@ * action ourselves. E.g., in the thread that calls * barrier.reset()]. */ + log.error("Forcing barrier break"); barrier.reset(); } Modified: branches/READ_CACHE2/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java =================================================================== --- branches/READ_CACHE2/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java 2013-08-09 23:15:33 UTC (rev 7281) +++ branches/READ_CACHE2/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java 2013-08-12 17:13:31 UTC (rev 7282) @@ -471,11 +471,11 @@ throw new UnsupportedOperationException(); } - @Override - public void didMeet(final long token, final long commitCounter, - final boolean isLeader) { - throw new UnsupportedOperationException(); - } +// @Override +// public void didMeet(final long token, final long commitCounter, +// final boolean isLeader) { +// throw new UnsupportedOperationException(); +// } @Override public File getServiceDir() { Modified: branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-09 23:15:33 UTC (rev 7281) +++ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-12 17:13:31 UTC (rev 7282) @@ -1055,8 +1055,19 @@ } - haLog.warn("runState=" + runState + ", oldRunState=" + oldRunState - + ", serviceName=" + server.getServiceName(), new StackInfoReport()); + // Note: *should* be non-null. Just paranoid. + final IRootBlockView rb = journal.getRootBlockView(); + + final String commitCounterStr = (rb == null) ? "N/A" : Long + .toString(rb.getCommitCounter()); + + haLog.warn("runState=" + runState // + + ", oldRunState=" + oldRunState // + + ", quorumToken=" + journal.getQuorumToken()// + + ", haStatus=" + journal.getHAStatus()// + + ", commitCounter=" + commitCounterStr// + + ", serviceName=" + server.getServiceName(),// + new StackInfoReport()); } @@ -1599,50 +1610,49 @@ } - /** - * {@inheritDoc} - * <p> - * If there is a fully met quorum, then we can purge all HA logs - * <em>EXCEPT</em> the current one. - */ - @Override - public void serviceJoin() { +// /** +// * {@inheritDoc} +// * <p> +// * If there is a fully met quorum, then we can purge all HA logs +// * <em>EXCEPT</em> the current one. +// */ +// @Override +// public void serviceJoin() { +// +// super.serviceJoin(); +// +//// // Submit task to handle this event. +//// server.singleThreadExecutor.execute(new MonitoredFutureTask<Void>( +//// new ServiceJoinTask())); +// } +// +// /** +// * Purge HALog files on a fully met quorum. +// */ +// private class ServiceJoinTask implements Callable<Void> { +// public Void call() throws Exception { +// +// final long token = getQuorum().token(); +// +// if (getQuorum().isQuorumFullyMet(token)) { +// /* +// * TODO Even though the quorum is fully met, we should wait +// * until we have a positive indication from the leader that +// * it is "ha ready" before purging the HA logs and aging put +// * snapshots. The leader might need to explicitly schedule +// * this operation against the joined services and the +// * services should then verify that the quorum is fully met +// * before they actually age out the HALogs and snapshots. +// */ +// purgeHALogs(token); +// +// } +// +// return null; +// +// } +// } - super.serviceJoin(); - - // FIXME serviceJoin() - restore event handler. -// // Submit task to handle this event. -// server.singleThreadExecutor.execute(new MonitoredFutureTask<Void>( -// new ServiceJoinTask())); - } - - /** - * Purge HALog files on a fully met quorum. - */ - private class ServiceJoinTask implements Callable<Void> { - public Void call() throws Exception { - - final long token = getQuorum().token(); - - if (getQuorum().isQuorumFullyMet(token)) { - /* - * TODO Even though the quorum is fully met, we should wait - * until we have a positive indication from the leader that - * it is "ha ready" before purging the HA logs and aging put - * snapshots. The leader might need to explicitly schedule - * this operation against the joined services and the - * services should then verify that the quorum is fully met - * before they actually age out the HALogs and snapshots. - */ - purgeHALogs(token); - - } - - return null; - - } - } - @Override public void memberRemove() { @@ -1840,6 +1850,19 @@ // ensure in pipeline. getActor().pipelineAdd(); + /** + * Make sure that the pipelineAdd() is handled before + * continuing. Handling this event will setup the + * HAReceiveService. This is necessary since the event is no + * longer synchronously handled. + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/695"> + * HAJournalServer reports "follower" but is in + * SeekConsensus and is not participating in commits</a> + */ + processEvents(); + { final long token = getQuorum().token(); @@ -3887,11 +3910,11 @@ } - @Override - public void didMeet(final long token, final long commitCounter, - final boolean isLeader) { - // NOP - } +// @Override +// public void didMeet(final long token, final long commitCounter, +// final boolean isLeader) { +// // NOP +// } @Override public File getServiceDir() { Modified: branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2013-08-09 23:15:33 UTC (rev 7281) +++ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2013-08-12 17:13:31 UTC (rev 7282) @@ -1077,7 +1077,9 @@ * snapshot as a percentage of the size of the journal. */ - final long journalSize = journal.size(); + // Note: This is the file size on the disk (or in memory). No locks + // should be required. + final long journalSize = journal.getBufferStrategy().getExtent(); // size(HALogs)/size(journal) as percentage. final int actualPercentLogSize = (int) (100 * (((double) haLogBytesOnDisk) / ((double) journalSize))); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jer...@us...> - 2013-08-09 23:15:45
|
Revision: 7281 http://bigdata.svn.sourceforge.net/bigdata/?rev=7281&view=rev Author: jeremy_carroll Date: 2013-08-09 23:15:33 +0000 (Fri, 09 Aug 2013) Log Message: ----------- Fixes for trac 704: json/ask; and 711 to do with HTTP POST with query as body Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ConnegUtil.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/MiniMime.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractIndexManagerTestCase.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestConneg.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ExampleProtocolTest.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAskJsonTrac704.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestPostNotURLEncoded.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestProtocolAll.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestRelease123Protocol.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2013-08-09 17:36:02 UTC (rev 7280) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -1480,6 +1480,11 @@ this.resp = resp; + /** Content Type header is required: + http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.2.1 + */ + resp.setContentType("text/html; charset="+charset.name()); + this.os = os; this.charset = charset; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2013-08-09 17:36:02 UTC (rev 7280) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -110,8 +110,12 @@ /** * RDF/XML. */ - static public final transient String MIME_RDF_XML = "application/rdf+xml"; + static public final transient String MIME_RDF_XML = "application/rdf+xml"; + public static final String MIME_SPARQL_QUERY = "application/sparql-query"; + + public static final String MIME_SPARQL_UPDATE = "application/sparql-update"; + /** * */ Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ConnegUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ConnegUtil.java 2013-08-09 17:36:02 UTC (rev 7280) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ConnegUtil.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -28,6 +28,8 @@ package com.bigdata.rdf.sail.webapp; +import java.io.IOException; +import java.io.OutputStream; import java.util.Arrays; import java.util.LinkedList; import java.util.List; @@ -35,6 +37,9 @@ import org.apache.log4j.Logger; import org.openrdf.query.resultio.BooleanQueryResultFormat; +import org.openrdf.query.resultio.BooleanQueryResultWriter; +import org.openrdf.query.resultio.BooleanQueryResultWriterFactory; +import org.openrdf.query.resultio.BooleanQueryResultWriterRegistry; import org.openrdf.query.resultio.TupleQueryResultFormat; import org.openrdf.rio.RDFFormat; @@ -49,6 +54,7 @@ * @version $Id$ */ public class ConnegUtil { + private static Logger log = Logger.getLogger(ConnegUtil.class); @@ -57,6 +63,40 @@ pattern = Pattern.compile("\\s*,\\s*"); } + + static { + // Work-around for sesame not handling ask and json (see trac 704 and 714) + + if (BooleanQueryResultFormat.forMIMEType(BigdataRDFServlet.MIME_SPARQL_RESULTS_JSON)!=null) { + // This should fire once trac 714 is fixed, and we have upgraded, at this point the whole static block should be deleted. + log.warn("Workaround for sesame 2.6 BooleanQueryResultFormat defect no longer needed", new RuntimeException("location of issue")); + } else { + final BooleanQueryResultFormat askJsonFormat = BooleanQueryResultFormat.register("SPARQL/JSON",BigdataRDFServlet.MIME_SPARQL_RESULTS_JSON,"srj"); + BooleanQueryResultWriterRegistry.getInstance().add(new BooleanQueryResultWriterFactory(){ + + @Override + public BooleanQueryResultFormat getBooleanQueryResultFormat() { + return askJsonFormat; + } + + @Override + public BooleanQueryResultWriter getWriter(final OutputStream out) { + return new BooleanQueryResultWriter(){ + + @Override + public BooleanQueryResultFormat getBooleanQueryResultFormat() { + return askJsonFormat; + } + + @Override + public void write(boolean arg0) throws IOException { + final String answer = "{ \"head\":{ } , \"boolean\": " + Boolean.toString(arg0) + " }"; + out.write(answer.getBytes("utf-8")); + }}; + }}); + } + } + private final ConnegScore<?>[] scores; /** @@ -110,7 +150,7 @@ // BooleanQueryResultFormat { - final BooleanQueryResultFormat booleanFormat = BooleanQueryResultFormat + BooleanQueryResultFormat booleanFormat = BooleanQueryResultFormat .forMIMEType(t.getMimeType()); if (booleanFormat != null) { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2013-08-09 17:36:02 UTC (rev 7280) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -26,6 +26,7 @@ import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.PrintWriter; +import java.io.Reader; import java.io.StringWriter; import java.io.Writer; import java.util.Iterator; @@ -145,12 +146,17 @@ protected void doPost(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { + if (req.getParameter(ATTR_UPDATE) != null) { // SPARQL 1.1 UPDATE. doUpdate(req, resp); - } else if (req.getParameter(ATTR_UUID) != null) { + } else if (RESTServlet.hasMimeType(req, MIME_SPARQL_UPDATE)) { + // SPARQL 1.1 UPDATE, see trac 711 for bug report motivating this case + doUpdate(req, resp); + + } else if (req.getParameter(ATTR_UUID) != null) { // UUID with caching defeated. doUUID(req, resp); @@ -298,7 +304,7 @@ * Handles SPARQL UPDATE. * * <pre> - * update (required) + * update OR update in body (see trac 711) * using-graph-uri (0 or more) * using-named-graph-uri (0 or more) * </pre> @@ -319,8 +325,8 @@ final long timestamp = ITx.UNISOLATED;//getTimestamp(req); - // The SPARQL query. - final String updateStr = req.getParameter("update"); + // The SPARQL update + final String updateStr = getUpdateString(req); if (updateStr == null) { @@ -419,15 +425,7 @@ final long timestamp = getTimestamp(req); - /* - * The SPARQL query. - * - * Note: This can be attached as a request attribute. That supports a - * linked data GET by turning it into a SPARQL DESCRIBE query. - */ - final String queryStr = req.getParameter(ATTR_QUERY) != null ? req - .getParameter(ATTR_QUERY) : (String) req - .getAttribute(ATTR_QUERY); + final String queryStr = getQueryString(req); if (queryStr == null) { @@ -652,7 +650,44 @@ } } + + /** + * The SPARQL query. + * + * Note: This can be attached as a request attribute. That supports a + * linked data GET by turning it into a SPARQL DESCRIBE query. + * @throws IOException + */ + private String getQueryString(final HttpServletRequest req) throws IOException { + if (RESTServlet.hasMimeType(req, MIME_SPARQL_QUERY)) { + // return the body of the POST, see trac 711 + return readFully( req.getReader() ); + } + return req.getParameter(ATTR_QUERY) != null ? req + .getParameter(ATTR_QUERY) : (String) req + .getAttribute(ATTR_QUERY); + } + + private String getUpdateString(final HttpServletRequest req) throws IOException { + if (RESTServlet.hasMimeType(req, MIME_SPARQL_UPDATE)) { + // return the body of the POST, see trac 711 + return readFully( req.getReader() ); + } + return req.getParameter(ATTR_UPDATE); + } + + static String readFully(Reader reader) throws IOException { + char[] arr = new char[8*1024]; // 8K at a time + StringBuffer buf = new StringBuffer(); + int numChars; + while ((numChars = reader.read(arr, 0, arr.length)) > 0) { + buf.append(arr, 0, numChars); + } + + return buf.toString(); + } + /** * Sends an explanation for the query rather than the query results. The * query is still run, but the query statistics are reported instead of the Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java 2013-08-09 17:36:02 UTC (rev 7280) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -33,6 +33,8 @@ import org.openrdf.model.URI; import org.openrdf.model.impl.URIImpl; +import com.bigdata.rdf.sail.webapp.client.MiniMime; + /** * Default dispatch pattern for a core REST API. * @@ -189,12 +191,15 @@ @Override protected void doPost(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - - if (req.getParameter(QueryServlet.ATTR_QUERY) != null + + if (req.getParameter(QueryServlet.ATTR_QUERY) != null || req.getParameter(QueryServlet.ATTR_UPDATE) != null || req.getParameter(QueryServlet.ATTR_UUID) != null || req.getParameter(QueryServlet.ATTR_ESTCARD) != null || req.getParameter(QueryServlet.ATTR_CONTEXTS) != null + // the two cases below were added to fix bug trac 711 + || hasMimeType(req, BigdataRDFServlet.MIME_SPARQL_UPDATE) + || hasMimeType(req, BigdataRDFServlet.MIME_SPARQL_QUERY) ) { // SPARQL QUERY -or- SPARQL UPDATE via POST @@ -231,6 +236,11 @@ } + static boolean hasMimeType(final HttpServletRequest req, String mimeType) { + String contentType = req.getContentType(); + return contentType != null && mimeType.equals(new MiniMime(contentType).getMimeType()); + } + /** * A PUT request always delegates to the {@link UpdateServlet}. * <p> Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/MiniMime.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/MiniMime.java 2013-08-09 17:36:02 UTC (rev 7280) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/MiniMime.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -33,6 +33,10 @@ * Extract and return the quality score for the mime type (defaults to * <code>1.0</code>). * + * Note: <a href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1">the grammar</a> permits + * whitespace fairly generally, but the parser in this class does not cope with this correctly, + * but largely assumes that such whitespace is omitted. + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ * @@ -50,7 +54,7 @@ public MiniMime(final String s) { final String[] b = s.split(";"); - mimeType = b[0]; + mimeType = b[0].trim(); float q = 1f; params = new String[b.length][]; for (int i = 1; i < b.length; i++) { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractIndexManagerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractIndexManagerTestCase.java 2013-08-09 17:36:02 UTC (rev 7280) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractIndexManagerTestCase.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -56,7 +56,7 @@ /** * Invoked from {@link TestCase#setUp()} for each test in the suite. */ - public void setUp(ProxyTestCase testCase) throws Exception { + public void setUp(ProxyTestCase<S> testCase) throws Exception { if(log.isInfoEnabled()) log.info("\n\n================:BEGIN:" + testCase.getName() @@ -67,7 +67,7 @@ /** * Invoked from {@link TestCase#tearDown()} for each test in the suite. */ - public void tearDown(ProxyTestCase testCase) throws Exception { + public void tearDown(ProxyTestCase<S> testCase) throws Exception { if(log.isInfoEnabled()) log.info("\n================:END:" + testCase.getName() @@ -98,6 +98,9 @@ abstract protected S getIndexManager(); + + + // /** // * Open/create an {@link IIndexManager} using the given properties. // */ Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -0,0 +1,327 @@ +/** +Copyright (C) SYSTAP, LLC 2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +package com.bigdata.rdf.sail.webapp; + +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import javax.servlet.http.HttpServlet; + +import org.apache.http.HttpEntity; +import org.apache.http.HttpResponse; +import org.apache.http.client.HttpClient; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.DefaultHttpClient; + +import com.bigdata.journal.IIndexManager; +import com.bigdata.rdf.sail.webapp.client.ConnectOptions; +import com.bigdata.rdf.sail.webapp.client.DefaultClientConnectionManagerFactory; + +/** + * This class supports making requests to the server with fairly low level control. + * Each operation is set up by calls to the protected methods such as + * {@link #setMethodisPost(String, String)}, {@link #setAllow400s()}, + * and then to call {@link #serviceRequest(String...)} to actually + * process the request. + * This process may be repeated multiple times. + * After each call to {@link #serviceRequest(String...)} + * the options are reset to the defaults. + * @author jeremycarroll + * + */ +public abstract class AbstractProtocolTest extends AbstractTestNanoSparqlClient<IIndexManager> { + + protected interface RequestFactory { + HttpUriRequest createRequest(String ... params); + }; + + protected static final String SELECT = "SELECT (1 as ?one){}"; + protected static final String ASK = "ASK WHERE {}"; + protected static final long PAUSE_BEFORE_CLOSE_TIME = 100; + private static int updateCounter = 0; + private static String update() { + return "INSERT { <http://example.org/a> <http://example.org/a> <http://example.org/" + updateCounter++ + "> } WHERE {}"; + } + + private static String askIfUpdated() { + return "ASK { <http://example.org/a> <http://example.org/a> <http://example.org/" + updateCounter + "> }"; + } + + /** + * A SPARQL ASK Query that returns true iff {@Link #update} has successfully run + */ + private final String askIfUpdated = askIfUpdated(); + /** + * A SPARQL Update that adds a triple + */ + final String update = update(); + + final HttpServlet servlet; + HttpClient client; + private String responseContentType = null; + private String accept = null; + private boolean permit400s = false; + + private final RequestFactory GET = new RequestFactory(){ + @Override + public HttpUriRequest createRequest(String... params) { + final StringBuffer url = new StringBuffer(m_serviceURL); + url.append("/sparql"); + char sep = '?'; + for (int i=0;i<params.length;i+=2) { + url.append(sep); + url.append(params[i]); + url.append('='); + try { + url.append(URLEncoder.encode(params[i+1], "UTF-8")); + } catch (final UnsupportedEncodingException e) { + // JVM must support UTF-8 + throw new Error(e); + } + sep='&'; + } + return new HttpGet(url.toString()); + } + }; + + private RequestFactory requestFactory = GET; + @Override + public void setUp() throws Exception { + super.setUp(); + client = new DefaultHttpClient(DefaultClientConnectionManagerFactory.getInstance().newInstance()); + resetDefaultOptions(); + } + /** + * This method is called automatically after each call to {@link #serviceRequest(String...)} + * so probably is unnecessary. + */ + protected void resetDefaultOptions() { + accept = null; + requestFactory = GET; + accept = null; + permit400s = false; + } + + /** + * + * @return The content type of the last response, null if none (e.g. a 204?) + */ + protected String getResponseContentType() { + return responseContentType; + } + + /** + * Sets the accept header, default is "*" + * @param mimetype + */ + protected void setAccept(String mimetype) { + accept = mimetype; + } + + static private Pattern charset = Pattern.compile("[; ]charset *= *\"?([^ ;\"]*)([ \";]|$)"); + + /** + * Sanity check the {@link #charset} pattern + * @param argv + */ + public static void main(String argv[]) { + for (final String t:new String[]{ + "text/html ; charset=iso-8856-1", + "text/html ; charset=iso-8856-1; foo = bar", + "text/html ;charset=iso-8856-1; foo = bar", + "text/html ; charset= \"iso-8856-1\"", + "text/html ; charset=iso-8856-1; foo = bar", + "text/html ; charset = iso-8856-1; foo = bar", + "text/html ; foo = bar", + "text/html", + + }) { + final Matcher m = charset.matcher(t); + System.err.println(t+ " ====> "+(m.find()?m.group(1):"")); + } + } + + /** + * This is the main entry point for subclasses. + * This method sends a request to the server, as set up + * by setABC methods, and returns the string send back to the client. + * @param paramValues This is an even number of param [=] value pairs. Multiple values for the same param are supported. + * These are passed to the server either as URL query params, or as URL encoded values in the body if the method + * {@link #setMethodisPostUrlEncodedData()} has been called. + * @return the data returned by the server. + * @throws IOException + */ + protected String serviceRequest(String ... paramValues) throws IOException { + HttpUriRequest req; + responseContentType = null; + try { + try { + req = requestFactory.createRequest(paramValues); + } catch (final Exception e) { + throw new RuntimeException(e); + } + req.setHeader("Accept", accept==null?"*":accept); + final HttpResponse resp = client.execute(req); + String page=""; + final HttpEntity entity = resp.getEntity(); + if (entity != null ) { + String encoding = "utf-8"; + assertNotNull("Entity in " + resp.getStatusLine().getStatusCode()+" response must specify content type",entity.getContentType()); + final Matcher m = charset.matcher(entity.getContentType().getValue()); + if (m.find()) { + encoding = m.group(1); + } + page = QueryServlet.readFully(new InputStreamReader(entity.getContent(),encoding)); + responseContentType = entity.getContentType().getValue(); + } + if ( resp.getStatusLine().getStatusCode()>=(permit400s?500:400) ) { + fail(resp.getStatusLine().toString()+"\n"+ page); + } + return page; + } + finally { + resetDefaultOptions(); + } + } + + private Map<String,String[]> pairs2map(String... paramValues) { + final Map<String,String[]> params = new HashMap<String,String[]>(); + for (int i=0;i<paramValues.length;i+=2) { + final String key = paramValues[i]; + final String value = paramValues[i+1]; + final String[] val = params.get(key); + if (val==null) { + params.put(key, new String[]{value}); + } else { + // horridly inefficient, never called? + final String nval[] = new String[val.length+1]; + System.arraycopy(val, 0, nval, 0, val.length); + nval[val.length] = value; + params.put(key, nval); + } + } + return params; + } + + /** + * The method is a POST usng url-encoded form data, with the parameters being those past + to {@link #serviceRequest(String...)} call. + */ + protected void setMethodisPostUrlEncodedData() { + requestFactory = new RequestFactory(){ + @Override + public HttpUriRequest createRequest(String... params) { + final HttpPost rslt = new HttpPost(m_serviceURL+"/sparql"); + try { + rslt.setEntity(ConnectOptions.getFormEntity(pairs2map(params))); + } catch (final Exception e) { + throw new RuntimeException(e); + } + return rslt; + } + }; + } + + /** + * The method is a POST of the given document + * @param mimeType The mimetype of the document + * @param body The string of the document body + */ + protected void setMethodisPost(String mimeType, String body) { + StringEntity toPostx = null; + try { + toPostx = new StringEntity(body, mimeType,"utf-8"); + } catch (final UnsupportedEncodingException e) { + throw new Error(e); + } + final HttpEntity toPost = toPostx; + requestFactory = new RequestFactory(){ + + @Override + public HttpUriRequest createRequest(String... params) { + final StringBuffer url = new StringBuffer(m_serviceURL); + url.append("/sparql"); + char sep = '?'; + for (int i=0;i<params.length;i+=2) { + url.append(sep); + url.append(params[i]); + url.append('='); + try { + url.append(URLEncoder.encode(params[i+1], "UTF-8")); + } catch (final UnsupportedEncodingException e) { + // JVM must support UTF-8 + throw new Error(e); + } + sep='&'; + } + final HttpPost rslt = new HttpPost(url.toString()); + rslt.setEntity(toPost); + return rslt; + } + }; + } + + /** + * Normally a 400 or 404 response fails the test, calling this method allows such responses. + */ + protected void setAllow400s() { + this.permit400s = true; + } + + /** + * Assert that the update from {@link #update} has or has not taken place. + * This calls {@link #resetDefaultOptions()}, and the next call to {@link #serviceRequest(String...)} + * will need to be setup after this call. + * @param expected The expected result + * @throws IOException + */ + protected void checkUpdate(boolean expected) throws IOException { + resetDefaultOptions(); + assertTrue(serviceRequest("query",askIfUpdated).contains(Boolean.toString(expected))); + } + + /** + * The next request is a GET, (this is the default) + */ + protected void setMethodAsGet() { + requestFactory = GET; + } + + public AbstractProtocolTest(HttpServlet servlet, String name) { + super(name); + this.servlet = servlet; + } + + public AbstractProtocolTest(String name) { + this(new QueryServlet(), name); + } +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ExampleProtocolTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ExampleProtocolTest.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ExampleProtocolTest.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -0,0 +1,54 @@ +/** +Copyright (C) SYSTAP, LLC 2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.rdf.sail.webapp; + +import java.io.IOException; + +import junit.framework.Test; + + +/** + * This class tests whether its superclass is working (at least a little) + * The superclass provides capability to check the request/response protocol, + * without actually starting a server. + * @author jeremycarroll + * + */ +public class ExampleProtocolTest extends AbstractProtocolTest { + + public ExampleProtocolTest(String name) { + super(name); + } + + public void test101() throws IOException { + assertTrue(serviceRequest("query","SELECT ( true AS ?t ) {}").contains("</sparql>")); + assertEquals("application/sparql-results+xml", getResponseContentType()); + } + + + static public Test suite() { + return ProxySuiteHelper.suiteWhenStandalone(ExampleProtocolTest.class,"test.*", TestMode.quads,TestMode.sids,TestMode.triples); + } + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -0,0 +1,203 @@ +/** +Copyright (C) SYSTAP, LLC 2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +package com.bigdata.rdf.sail.webapp; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.Enumeration; +import java.util.regex.Pattern; + +import junit.extensions.proxy.ProxyTestSuite; +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +/** + * This class provides static methods to help creating + * test classes and suites of tests that use the proxy test + * approach. For creating test classes use {@link #suiteWhenStandalone(Class, String, TestMode...)}, + * when creating test suites use {@link #suiteWithOptionalProxy(String, TestMode...)} + * <p> + * The intent is to enable the developer in eclipse to run JUnit tests + * from a test file or a test suite file, while still allowing that same file + * to be included unchanged in the main test suite. The methods defined here + * hence provide a default behavior in the case that the {@link TestNanoSparqlServerWithProxyIndexManager} + * has already loaded before this class. + * @author jeremycarroll + * + */ +public class ProxySuiteHelper { + + private static class CloningTestSuite extends ProxyTestSuite { + + public CloningTestSuite(Test delegate, String name) { + super(delegate, name); + } + + @Override + public void addTest(Test test) { + super.addTest(cloneTest(getDelegate(),test)); + } + } + + private static class MultiModeTestSuite extends TestSuite { + private final ProxyTestSuite subs[]; + + public MultiModeTestSuite(String name, TestMode ...modes ) { + super(name); + subs = new ProxyTestSuite[modes.length]; + int i = 0; + for (final TestMode mode: modes) { + final ProxyTestSuite suite2 = TestNanoSparqlServerWithProxyIndexManager.createProxyTestSuite(TestNanoSparqlServerWithProxyIndexManager.getTemporaryJournal(),mode); + super.addTest(suite2); + suite2.setName(mode.name()); + subs[i++] = suite2; + } + } + + @SuppressWarnings("rawtypes") + @Override + public void addTestSuite(Class clazz) { + for (final ProxyTestSuite s:subs) { + s.addTestSuite(clazz); + } + } + + @Override + public void addTest(Test test) { + for (final ProxyTestSuite s:subs) { + s.addTest(cloneTest(s.getDelegate(),test)); + } + } + } + + private static Test cloneTest(Test delegate, Test test) { + if (test instanceof TestSuite) { + return cloneSuite(delegate, (TestSuite)test); + } + if (test instanceof TestCase) { + return cloneTestCase((TestCase)test); + } + throw new IllegalArgumentException("Cannot handle test of type: "+test.getClass().getName()); + } + + + private static Test cloneTestCase(TestCase test) { + return createTest(test.getClass(),test.getName()); + } + + private static Test cloneSuite(Test delegate, TestSuite suite) { + final TestSuite rslt = new CloningTestSuite(delegate,suite.getName()); + @SuppressWarnings("unchecked") + final + Enumeration<Test> enumerate = suite.tests(); + while( enumerate.hasMoreElements() ) { + rslt.addTest(enumerate.nextElement()); + } + return rslt; + } + + /** + * This variable tells us if the class {@link TestNanoSparqlServerWithProxyIndexManager} + * (or potentially a similar class that sets this variable) + * has loaded. This information is used by {@link #suiteWhenStandalone(Class, String, TestMode...)} + * to decide whether to operate in stand-alone fashion, or to default to participating + * in a larger test suite, managed by the proxy. + */ + static boolean proxyIndexManagerTestingHasStarted = false; + + /** + * Call this method to create local testing using one or more proxies. + * e.g. right clicking in eclipse and running JUnit tests works. + * Also using this within a TestSuite also works. + * + * + * @param clazz The clazz to be tested, i.e. the calling class + * @param regex Matched against the test names to decide which tests to run. Should usually start in "test.*" + * @param modes One or more TestModes. + * @return + */ + public static Test suiteWhenStandalone(Class<? extends TestCase> clazz, String regex, TestMode ... modes) { + if (!proxyIndexManagerTestingHasStarted) { + final Pattern pat = Pattern.compile(regex); + proxyIndexManagerTestingHasStarted = true; + final TestSuite suite = new MultiModeTestSuite(clazz.getName(),modes); + addMatchingTestsFromClass(suite, clazz, pat); + return suite; + } else { + return new TestSuite(clazz); + } + } + + /** + * Call this method to create a new test suite which can include + * other test suites and tests using proxies. + * Having created the test suite then the classes and tests and suites + * are added in the usual way. + * @param modes One or more TestModes. + * @return + */ + public static TestSuite suiteWithOptionalProxy(String name, TestMode ... mode) { + if (!proxyIndexManagerTestingHasStarted) { + proxyIndexManagerTestingHasStarted = true; + return new MultiModeTestSuite(name,mode); + } else { + return new TestSuite(name); + } + } + + private static void addMatchingTestsFromClass(TestSuite suite3, Class<? extends TestCase> clazz, Pattern pat) { + for (final Method m:clazz.getMethods()) { + if ( m.getParameterTypes().length==0 && pat.matcher(m.getName()).matches() ) { + suite3.addTest(createTest(clazz,m.getName())); + } + } + } + + private static Test createTest(Class<? extends TestCase> clazz, String name) { + try { + @SuppressWarnings("unchecked") + final + Constructor<? extends TestCase> cons = TestSuite.getTestConstructor(clazz); + if (cons.getParameterTypes().length == 1) { + return cons.newInstance(name); + } else { + final TestCase test = cons.newInstance(); + test.setName(name); + return test; + } + } catch (final NoSuchMethodException e) { + throw new RuntimeException("Failed to find constructor"); + } catch (final InstantiationException e) { + throw new RuntimeException(e); + } catch (final IllegalAccessException e) { + throw new RuntimeException(e); + } catch (final IllegalArgumentException e) { + throw new RuntimeException(e); + } catch (final InvocationTargetException e) { + throw new RuntimeException(e); + } + } +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java 2013-08-09 17:36:02 UTC (rev 7280) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -57,10 +57,10 @@ final TestSuite suite = new TestSuite("WebApp"); -// suite.addTest(TestNanoSparqlServerWithProxyIndexManager2.suite(TestMode.triples)); -// -// suite.addTest(TestNanoSparqlServerWithProxyIndexManager2.suite(TestMode.sids)); + suite.addTest(TestNanoSparqlServerWithProxyIndexManager2.suite(TestMode.triples)); + suite.addTest(TestNanoSparqlServerWithProxyIndexManager2.suite(TestMode.sids)); + suite.addTest(TestNanoSparqlServerWithProxyIndexManager2.suite(TestMode.quads)); return suite; Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAskJsonTrac704.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAskJsonTrac704.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAskJsonTrac704.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -0,0 +1,60 @@ +/** +Copyright (C) SYSTAP, LLC 2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +package com.bigdata.rdf.sail.webapp; + +import java.io.IOException; + +import junit.framework.Test; + + +public class TestAskJsonTrac704 extends AbstractProtocolTest { + + public TestAskJsonTrac704(String name) { + super(name); + } + + + static public Test suite() { + return ProxySuiteHelper.suiteWhenStandalone(TestAskJsonTrac704.class,"test.*",TestMode.quads,TestMode.sids,TestMode.triples); + } + + /** + * This does not work - trac 704 + * @throws IOException + */ + public void testAskGetJSON() throws IOException { + this.setAccept(BigdataRDFServlet.MIME_SPARQL_RESULTS_JSON); + final String response = serviceRequest("query",AbstractProtocolTest.ASK); + assertTrue("Bad response: "+response,response.contains("\"boolean\": ")); + assertEquals(BigdataRDFServlet.MIME_SPARQL_RESULTS_JSON, getResponseContentType()); + } + + /** + * This does not work - trac 704 + * @throws IOException + */ + public void testAskPostEncodeJSON() throws IOException { + setMethodisPostUrlEncodedData(); + testAskGetJSON() ; + } +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestConneg.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestConneg.java 2013-08-09 17:36:02 UTC (rev 7280) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestConneg.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -164,5 +164,12 @@ } } + + public void test_conneg_ask_json() { + final ConnegUtil util = new ConnegUtil(BigdataRDFServlet.MIME_SPARQL_RESULTS_JSON); + final BooleanQueryResultFormat format = util + .getBooleanQueryResultFormat(BooleanQueryResultFormat.SPARQL); + assertFalse(format.toString(),format.toString().toLowerCase().contains("xml")); + } } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java 2013-08-09 17:36:02 UTC (rev 7280) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -1,3 +1,26 @@ +/** +Copyright (C) SYSTAP, LLC 2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + package com.bigdata.rdf.sail.webapp; import java.io.ByteArrayOutputStream; @@ -5,6 +28,8 @@ import java.io.IOException; import java.util.Collection; +import junit.framework.Test; + import org.openrdf.model.Graph; import org.openrdf.model.Literal; import org.openrdf.model.Resource; @@ -51,6 +76,9 @@ } + public static Test suite() { + return ProxySuiteHelper.suiteWhenStandalone(TestNanoSparqlClient.class, "test.*DELETE.*", TestMode.quads,TestMode.sids,TestMode.triples); + } public void test_startup() throws Exception { assertTrue("open", m_fixture.isRunning()); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java 2013-08-09 17:36:02 UTC (rev 7280) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -1,5 +1,31 @@ +/** +Copyright (C) SYSTAP, LLC 2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + package com.bigdata.rdf.sail.webapp; +import junit.framework.Test; +import junit.framework.TestSuite; + import com.bigdata.journal.IIndexManager; import com.bigdata.rdf.sail.webapp.client.RemoteRepositoryManager; @@ -20,6 +46,10 @@ super(name); } + + static public Test suite() { + return ProxySuiteHelper.suiteWhenStandalone(TestNanoSparqlClient2.class,"test.*", TestMode.quads,TestMode.sids,TestMode.triples); + } /** * Delete everything matching an access path description. Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java 2013-08-09 17:36:02 UTC (rev 7280) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -34,6 +34,7 @@ import junit.framework.TestCase; import junit.framework.TestListener; import junit.framework.TestResult; +import junit.framework.TestSuite; import junit.textui.ResultPrinter; import com.bigdata.journal.BufferMode; @@ -79,6 +80,10 @@ */ public class TestNanoSparqlServerWithProxyIndexManager<S extends IIndexManager> extends AbstractIndexManagerTestCase<S> { + + static { + ProxySuiteHelper.proxyIndexManagerTestingHasStarted = true; + } /** * The {@link IIndexManager} for the backing persistence engine (may be a @@ -109,7 +114,7 @@ } - static private Journal getTemporaryJournal() { + static Journal getTemporaryJournal() { final Properties properties = new Properties(); @@ -196,20 +201,12 @@ * Return suite running in the given mode against the given * {@link IIndexManager}. */ - public static Test suite(final IIndexManager indexManager, + public static TestSuite suite(final IIndexManager indexManager, final TestMode testMode) { - final TestNanoSparqlServerWithProxyIndexManager<?> delegate = new TestNanoSparqlServerWithProxyIndexManager( - null/* name */, indexManager, testMode); // !!!! THIS CLASS !!!! + final ProxyTestSuite suite = createProxyTestSuite(indexManager,testMode); /* - * Use a proxy test suite and specify the delegate. - */ - - final ProxyTestSuite suite = new ProxyTestSuite(delegate, - "NanoSparqlServer Proxied Test Suite"); - - /* * List any non-proxied tests (typically bootstrapping tests). */ @@ -219,6 +216,9 @@ /* * Proxied test suites. */ + + //Protocol + suite.addTest(TestProtocolAll.suite()); // Multi-tenancy API. suite.addTestSuite(TestMultiTenancyAPI.class); @@ -251,6 +251,19 @@ } + static ProxyTestSuite createProxyTestSuite(final IIndexManager indexManager, final TestMode testMode) { + final TestNanoSparqlServerWithProxyIndexManager<?> delegate = new TestNanoSparqlServerWithProxyIndexManager( + null/* name */, indexManager, testMode); // !!!! THIS CLASS !!!! + + /* + * Use a proxy test suite and specify the delegate. + */ + + final ProxyTestSuite suite = new ProxyTestSuite(delegate, + "NanoSparqlServer Proxied Test Suite"); + return suite; + } + @SuppressWarnings("unchecked") public S getIndexManager() { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java 2013-08-09 17:36:02 UTC (rev 7280) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -79,6 +79,10 @@ */ public class TestNanoSparqlServerWithProxyIndexManager2<S extends IIndexManager> extends AbstractIndexManagerTestCase<S> { + + static { + ProxySuiteHelper.proxyIndexManagerTestingHasStarted = true; + } /** * The {@link IIndexManager} for the backing persistence engine (may be a @@ -214,6 +218,8 @@ */ suite.addTestSuite(TestNanoSparqlClient2.class); + //Protocol + suite.addTest(TestProtocolAll.suite()); return suite; Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestPostNotURLEncoded.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestPostNotURLEncoded.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestPostNotURLEncoded.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -0,0 +1,83 @@ +/** +Copyright (C) SYSTAP, LLC 2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.rdf.sail.webapp; + +import java.io.IOException; + +import junit.framework.Test; + +/** + * See trac 711 for discussion. + * + * @author jeremycarroll + * + */ + +public class TestPostNotURLEncoded extends AbstractProtocolTest { + + + static public Test suite() { + return ProxySuiteHelper.suiteWhenStandalone(TestPostNotURLEncoded.class,"test.*", TestMode.quads,TestMode.sids,TestMode.triples); + } + public TestPostNotURLEncoded(String name) { + super(name); + } + + public void testSelectPostXML() throws IOException { + setMethodisPost("application/sparql-query",AbstractProtocolTest.SELECT); + assertTrue(serviceRequest().contains("</sparql>")); + assertEquals(BigdataRDFServlet.MIME_SPARQL_RESULTS_XML, getResponseContentType()); + } + + + public void testSelectPostJSON() throws IOException { + setAccept(BigdataRDFServlet.MIME_SPARQL_RESULTS_JSON); + setMethodisPost("application/sparql-query",AbstractProtocolTest.SELECT); + assertTrue(serviceRequest().contains("\"results\": {")); + assertEquals(BigdataRDFServlet.MIME_SPARQL_RESULTS_JSON, getResponseContentType()); + } + + public void testAskPostXML() throws IOException { + setMethodisPost("application/sparql-query",AbstractProtocolTest.ASK); + assertTrue(serviceRequest().contains("</sparql>")); + assertEquals(BigdataRDFServlet.MIME_SPARQL_RESULTS_XML, getResponseContentType()); + } + + + public void testAskPostJSON() throws IOException { + setAccept(BigdataRDFServlet.MIME_SPARQL_RESULTS_JSON); + setMethodisPost("application/sparql-query",AbstractProtocolTest.ASK); + String response = serviceRequest("query",AbstractProtocolTest.ASK); + assertTrue("Bad response: "+response,response.contains("\"boolean\": ")); + assertEquals(BigdataRDFServlet.MIME_SPARQL_RESULTS_JSON, getResponseContentType()); + } + + + public void testUpdatePost() throws IOException { + checkUpdate(false); + setMethodisPost("application/sparql-update",update); + serviceRequest(); + checkUpdate(true); + } +} Copied: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestProtocolAll.java (from rev 7269, branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java) =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestProtocolAll.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestProtocolAll.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -0,0 +1,40 @@ +/** +Copyright (C) SYSTAP, LLC 2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sail.webapp; + + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +public class TestProtocolAll extends TestCase { + public static Test suite() { + final TestSuite suite = ProxySuiteHelper.suiteWithOptionalProxy("SPARQL 1.1 Protocol",TestMode.quads,TestMode.triples, TestMode.sids); + suite.addTestSuite(ExampleProtocolTest.class); + suite.addTestSuite(TestRelease123Protocol.class); + suite.addTestSuite(TestPostNotURLEncoded.class); + suite.addTestSuite(TestAskJsonTrac704.class); + return suite; + } + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestRelease123Protocol.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestRelease123Protocol.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestRelease123Protocol.java 2013-08-09 23:15:33 UTC (rev 7281) @@ -0,0 +1,93 @@ +/** +Copyright (C) SYSTAP, LLC 2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sail.webapp; + +import java.io.IOException; + +import junit.framework.Test; + + +/** + * This test class exercises protocol issues (mimetypes, parameters etc) + * as at release 1.2.3; prior to addressing protocol related trac items such + * as 704, 697, 711 + * @author jeremycarroll + * + */ +public class TestRelease123Protocol extends AbstractProtocolTest{ + + static public Test suite() { + return ProxySuiteHelper.suiteWhenStandalone(TestRelease123Protocol.class,"test.*", TestMode.quads,TestMode.sids,TestMode.triples); + } + public TestRelease123Protocol(String name) { + super(name); + } + + public void testSelectGetXML() throws IOException { + assertTrue(serviceRequest("query",SELECT).contains("</sparql>")); + assertEquals(BigdataRDFServlet.MIME_SPARQL_RESULTS_XML, getResponseContentType()); + } + + public void testSelectGetJSON() throws IOException { + this.setAccept(BigdataRDFServlet.MIME_SPARQL_RESULTS_JSON); + assertTrue(serviceRequest("query",SELECT).contains("\"results\": {")); + assertEquals(BigdataRDFServlet.MIME_SPARQL_RESULTS_JSON, getResponseContentType()); + + } + public void testAskGetXML() throws IOException { + assertTrue(serviceRequest("query",ASK).contains("</sparql>")); + assertEquals(BigdataRDFServlet.MIME_SPARQL_RESULTS_XML, getResponseContentType()); + } + + public void testSelectPostEncodeXML() throws IOException { + setMethodisPostUrlEncodedData(); + testSelectGetXML(); + } + + public void testSelectPostEncodeJSON() throws IOException { + setMethodisPostUrlEncodedData(); + testSelectGetJSON(); + } + public void testAskPostEncodeXML() throws IOException { + setMethodisPostUrlEncodedData(); + testAskGetXML(); + } + + + public void testUpdateGet() throws IOException { + // This should not cause an update - in release 1.2.3 it returns a service description + // which seems a little strange but is not wrong; this test will also allow a 4XX response. + checkUpdate(false); + setAllow400s(); + serviceRequest("update",update); + checkUpdate(false); + } + public void testUpdatePostEncode() throws IOException { + checkUpdate(false); + setMethodisPostUrlEncodedData(); + serviceRequest("update",update); + checkUpdate(true); + } + + +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-09 17:36:10
|
Revision: 7280 http://bigdata.svn.sourceforge.net/bigdata/?rev=7280&view=rev Author: thompsonbry Date: 2013-08-09 17:36:02 +0000 (Fri, 09 Aug 2013) Log Message: ----------- It turns out that the variable <code>now</code> in the GatherTask was only used for the error response. The method {{{ protected IHANotifyReleaseTimeRequest newHANotifyReleaseTimeRequest(final UUID serviceId) }}} was actually taking its own timestamp using the following method, which is designed to allow override from the HA test suite. {{{ private long newConsensusProtocolTimestamp() }}} Since the clock skew issue arose only in the error path, I have modified the error path to use nextTimestamp() and thus report a reasonable timestamp and avoid this false clock skew error. I have also modified one of the HA tests to use the ABC() utility to start of the services. It was replicating that logic inside of the test code. @See #720 Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-09 17:04:20 UTC (rev 7279) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-09 17:36:02 UTC (rev 7280) @@ -1463,11 +1463,12 @@ log.info("Running gather on follower"); /* - * These variables are set in the try {} below. If we can - * discover the leader, then we will eventually respond either - * in the try{} or in the finally{}. + * This variable is set in the try {} below. We eventually + * respond either in the try{} or in the finally{}, depending on + * whether or not the GatherTask encounters an error when it + * executes. */ - long now = 0L; +// long now = 0L; boolean didNotifyLeader = false; @@ -1497,18 +1498,10 @@ final QuorumService<HAGlue> quorumService = getQuorum() .getClient(); - /* - * This timestamp is used to help detect clock skew. - */ - now = newConsensusProtocolTimestamp(); - - // The leader is obtained by its serviceId above. // /* -// * If the token is invalid, making it impossible for us to -// * discover and message the leader, then the leader will -// * reset() the CyclicBarrier. +// * This timestamp is used to help detect clock skew. // */ -// leader = quorumService.getLeader(token); +// now = newConsensusProtocolTimestamp(); /* * Note: At this point we have everything we need to form up @@ -1621,7 +1614,8 @@ try { final IHANotifyReleaseTimeRequest resp = new HANotifyReleaseTimeRequest( serviceId, 0L/* pinnedCommitTime */, - 1L/* pinnedCommitCounter */, now/* timestamp */); + 1L/* pinnedCommitCounter */, + nextTimestamp()/* timestamp */); log.warn("Sending mock response for gather protocol: cause=" + t); // Will block until barrier breaks on leader. Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-08-09 17:04:20 UTC (rev 7279) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-08-09 17:36:02 UTC (rev 7280) @@ -1104,18 +1104,19 @@ */ public void testStartABC_Rebuild() throws Exception { - { - - final HAGlue serverA = startA(); - final HAGlue serverB = startB(); - final HAGlue serverC = startC(); - - awaitFullyMetQuorum(); - - // Await initial commit point (KB create) on all servers. - awaitCommitCounter(1L, serverA, serverB, serverC); - - } + new ABC(false/*sequential*/); // simultaneous start. +// { +// +// final HAGlue serverA = startA(); +// final HAGlue serverB = startB(); +// final HAGlue serverC = startC(); +// +// awaitFullyMetQuorum(); +// +// // Await initial commit point (KB create) on all servers. +// awaitCommitCounter(1L, serverA, serverB, serverC); +// +// } // Now run several transactions for (int i = 0; i < 5; i++) This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-09 17:04:27
|
Revision: 7279 http://bigdata.svn.sourceforge.net/bigdata/?rev=7279&view=rev Author: thompsonbry Date: 2013-08-09 17:04:20 +0000 (Fri, 09 Aug 2013) Log Message: ----------- commented out superclass constructor that is not available in JDK 1.6 Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/StackInfoReport.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/StackInfoReport.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/StackInfoReport.java 2013-08-09 16:17:58 UTC (rev 7278) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/StackInfoReport.java 2013-08-09 17:04:20 UTC (rev 7279) @@ -47,10 +47,11 @@ super(); } - public StackInfoReport(String message, Throwable cause, - boolean enableSuppression, boolean writableStackTrace) { - super(message, cause, enableSuppression, writableStackTrace); - } + // Note: This constructor is not available in JDK 1.6. +// public StackInfoReport(String message, Throwable cause, +// boolean enableSuppression, boolean writableStackTrace) { +// super(message, cause, enableSuppression, writableStackTrace); +// } public StackInfoReport(String message, Throwable cause) { super(message, cause); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-09 16:18:05
|
Revision: 7278 http://bigdata.svn.sourceforge.net/bigdata/?rev=7278&view=rev Author: thompsonbry Date: 2013-08-09 16:17:58 +0000 (Fri, 09 Aug 2013) Log Message: ----------- Error checking for nulls where not allowed in the RMI messages. Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java 2013-08-09 16:15:49 UTC (rev 7277) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java 2013-08-09 16:17:58 UTC (rev 7278) @@ -39,6 +39,8 @@ public HAGatherReleaseTimeRequest(final long token, final long timestampOnLeader, final UUID leaderId) { + if (leaderId == null) + throw new IllegalArgumentException(); this.token = token; this.timestampOnLeader = timestampOnLeader; this.leaderId = leaderId; Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java 2013-08-09 16:15:49 UTC (rev 7277) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HANotifyReleaseTimeRequest.java 2013-08-09 16:17:58 UTC (rev 7278) @@ -40,6 +40,8 @@ public HANotifyReleaseTimeRequest(final UUID serviceUUID, final long pinnedCommitTime, final long pinnedCommitCounter, final long timestamp) { + if (serviceUUID == null) + throw new IllegalArgumentException(); this.serviceUUID = serviceUUID; this.pinnedCommitTime = pinnedCommitTime; this.pinnedCommitCounter = pinnedCommitCounter; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-09 16:15:56
|
Revision: 7277 http://bigdata.svn.sourceforge.net/bigdata/?rev=7277&view=rev Author: thompsonbry Date: 2013-08-09 16:15:49 +0000 (Fri, 09 Aug 2013) Log Message: ----------- The serviceId of the follower needs to be defined in order to make the RMI back to the leader since the leader requires the serviceId in order to know which services have responded. The serviceId is now passed into the factory method that returns the GatherTask. It was being set only after the error was detected and hence the RMI back to the leader was failing. @see #720 Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/AbstractHATransactionService.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-09 16:01:16 UTC (rev 7276) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-09 16:15:49 UTC (rev 7277) @@ -7708,11 +7708,16 @@ if (leader == null) throw new RuntimeException( "Could not discover the quorum leader."); + + final UUID serviceId = getServiceId(); + + if(serviceId == null) + throw new AssertionError(); final Callable<IHANotifyReleaseTimeResponse> task = ((AbstractHATransactionService) AbstractJournal.this - .getLocalTransactionManager() - .getTransactionService()) - .newGatherMinimumVisibleCommitTimeTask(leader, req); + .getLocalTransactionManager().getTransactionService()) + .newGatherMinimumVisibleCommitTimeTask(leader, + serviceId, req); final FutureTask<IHANotifyReleaseTimeResponse> ft = new FutureTask<IHANotifyReleaseTimeResponse>(task); Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-09 16:01:16 UTC (rev 7276) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-09 16:15:49 UTC (rev 7277) @@ -1376,9 +1376,10 @@ @Override public Callable<IHANotifyReleaseTimeResponse> newGatherMinimumVisibleCommitTimeTask( - final HAGlue leader, final IHAGatherReleaseTimeRequest req) { + final HAGlue leader, final UUID serviceId, + final IHAGatherReleaseTimeRequest req) { - return new GatherTask(leader, req); + return new GatherTask(leader, serviceId, req); } @@ -1415,19 +1416,36 @@ */ private class GatherTask implements Callable<IHANotifyReleaseTimeResponse> { + /** + * The proxy for the leader (the service that made this request). + * This is used to RMI back to the leader and therefore MUST be non- + * <code>null</code>. + */ private final HAGlue leader; + /** + * The {@link UUID} of <em>this</em> service. This is required as + * part of the RMI back to the leader (so the leader knows which + * services responded) and therefore MUST be non-<code>null</code>. + */ + private final UUID serviceId; private final IHAGatherReleaseTimeRequest req; - public GatherTask(final HAGlue leader, final IHAGatherReleaseTimeRequest req) { + public GatherTask(final HAGlue leader, final UUID serviceId, + final IHAGatherReleaseTimeRequest req) { if (leader == null) throw new IllegalArgumentException(); + if (serviceId == null) + throw new IllegalArgumentException(); + if (req == null) throw new IllegalArgumentException(); this.leader = leader; + this.serviceId = serviceId; + this.req = req; } @@ -1450,7 +1468,6 @@ * in the try{} or in the finally{}. */ long now = 0L; - UUID serviceId = null; boolean didNotifyLeader = false; @@ -1480,9 +1497,6 @@ final QuorumService<HAGlue> quorumService = getQuorum() .getClient(); - // The serviceId for this service. - serviceId = quorumService.getServiceId(); - /* * This timestamp is used to help detect clock skew. */ Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/AbstractHATransactionService.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/AbstractHATransactionService.java 2013-08-09 16:01:16 UTC (rev 7276) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/AbstractHATransactionService.java 2013-08-09 16:15:49 UTC (rev 7277) @@ -58,6 +58,14 @@ /** * Factory for the Gather task that will be executed by the follower. * + * @param leader + * The proxy for the quorum leader (the service that made this + * request). This is used to RMI back to the leader and therefore + * MUST be non- <code>null</code>. + * @param serviceId + * The {@link UUID} of this service. This is required as part of + * the RMI back to the leader (so the leader knows which services + * responded) and therefore MUST be non-<code>null</code>. * @param req * The request. * @@ -67,7 +75,8 @@ * Native thread leak in HAJournalServer process </a> */ abstract public Callable<IHANotifyReleaseTimeResponse> newGatherMinimumVisibleCommitTimeTask( - final HAGlue leader, final IHAGatherReleaseTimeRequest req); + final HAGlue leader, final UUID serviceId, + final IHAGatherReleaseTimeRequest req); /** * Coordinate the update of the <i>releaseTime</i> on each service that is This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |