|
From: <tho...@us...> - 2013-09-04 20:18:38
|
Revision: 7389
http://bigdata.svn.sourceforge.net/bigdata/?rev=7389&view=rev
Author: thompsonbry
Date: 2013-09-04 20:18:31 +0000 (Wed, 04 Sep 2013)
Log Message:
-----------
I've added a simple InferenceChangeLogReporter. This class may be used to obtain the actual inferences computed during a transaction.
I modified the change log listener test suite to also test this this new listener.
The present implementation uses a LinkedHashMap to store the ISPOs for the inferences. This should be scalable up to millions of inferences. If very large inferences will be drawn, then we could substitute an HTree index for the LinkedHashMap. The existing NativeDistinctFilter already does exactly this and simply replace the hard coded use of the LinkedHashSet for improved scaling. In this case, the native memory associated with the HTree needs to be released, e.g., through an ICloseable protocol on the change listener.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InferenceChangeLogReporter.java
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InferenceChangeLogReporter.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InferenceChangeLogReporter.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InferenceChangeLogReporter.java 2013-09-04 20:18:31 UTC (rev 7389)
@@ -0,0 +1,145 @@
+package com.bigdata.rdf.changesets;
+
+import java.beans.Statement;
+import java.util.LinkedHashSet;
+import java.util.Set;
+
+import org.openrdf.model.Value;
+
+import com.bigdata.bop.rdf.filter.NativeDistinctFilter;
+import com.bigdata.htree.HTree;
+import com.bigdata.rdf.internal.IV;
+import com.bigdata.rdf.spo.ISPO;
+import com.bigdata.rdf.store.AbstractTripleStore;
+import com.bigdata.rdf.store.BigdataStatementIterator;
+import com.bigdata.rdf.store.BigdataStatementIteratorImpl;
+import com.bigdata.striterator.ChunkedWrappedIterator;
+import com.bigdata.striterator.IChunkedOrderedIterator;
+import com.bigdata.striterator.ICloseable;
+
+/**
+ * {@link IChangeLog} implementation reports inferences as RDF {@link Statement}
+ * s. You install this change listener before writing on the sail connection.
+ * After the commit, you use {@link #addedIterator()} (
+ * {@link #removedIterator()}) to visit the inferences that were added to
+ * (removed from) the KB by the transaction. If the transaction is aborted,
+ * simply discard the {@link InferenceChangeLogReporter} object. Always use a
+ * new instance of this object for each transaction.
+ *
+ * TODO The present implementation uses a LinkedHashMap to store the ISPOs for
+ * the inferences. This should be scalable up to millions of inferences, and
+ * maybe the low 10s of millions. If very large sets of inferences will be
+ * drawn, then we could substitute an {@link HTree} index for the
+ * {@link LinkedHashSet}. The existing {@link NativeDistinctFilter} class
+ * automatically converts from a JVM hash collection to an {@link HTree} and and
+ * could be used trivially as a replacement for the {@link LinkedHashSet} in
+ * this class. In this case, the native memory associated with the HTree needs
+ * to be released, e.g., through an {@link ICloseable} protocol on the change
+ * listener.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+public class InferenceChangeLogReporter implements IChangeLog {
+
+ /**
+ * The KB.
+ */
+ private final AbstractTripleStore kb;
+
+ /** New inferences. */
+ private final Set<ISPO> added = new LinkedHashSet<ISPO>();
+
+ /** Removed inferences. */
+ private final Set<ISPO> removed = new LinkedHashSet<ISPO>();
+
+ /**
+ *
+ * @param kb
+ * The KB (used to resolve {@link IV}s to {@link Value}s).
+ */
+ public InferenceChangeLogReporter(final AbstractTripleStore kb) {
+ this.kb = kb;
+ }
+
+ /**
+ * Clear the internal state. This may be used to reset the listener if
+ * multiple commits are used for the same connection.
+ * <p>
+ * Note: It is faster to get a new {@link InferenceChangeLogReporter} than
+ * to clear the internal maps, but you can not replace an {@link IChangeLog}
+ * listener once established on a connection.
+ */
+ public void clear() {
+ added.clear();
+ removed.clear();
+ }
+
+ @Override
+ public void changeEvent(IChangeRecord record) {
+ final ISPO spo = record.getStatement();
+ if (!spo.isInferred())
+ return;
+ switch (record.getChangeAction()) {
+ case INSERTED:
+ added.add(spo);
+ break;
+ case REMOVED:
+ removed.add(spo);
+ break;
+ case UPDATED:
+ // ignore. statement already exists.
+ break;
+ default:
+ throw new AssertionError();
+ }
+ }
+
+ @Override
+ public void transactionBegin() {
+ }
+
+ @Override
+ public void transactionPrepare() {
+ }
+
+ @Override
+ public void transactionCommited(long commitTime) {
+
+ }
+
+ @Override
+ public void transactionAborted() {
+
+ }
+
+ /**
+ * Return iterator visiting the inferences that were added to the KB.
+ */
+ public BigdataStatementIterator addedIterator() {
+
+ // Wrap as chunked iterator.
+ final IChunkedOrderedIterator<ISPO> src = new ChunkedWrappedIterator<ISPO>(
+ added.iterator());
+
+ // Asynchronous conversion of ISPOs to Statements.
+ return new BigdataStatementIteratorImpl(kb, src).start(kb
+ .getExecutorService());
+
+ }
+
+ /**
+ * Return iterator visiting the inferences that were removed from the KB.
+ */
+ public BigdataStatementIterator removedIterator() {
+
+ // Wrap as chunked iterator.
+ final IChunkedOrderedIterator<ISPO> src = new ChunkedWrappedIterator<ISPO>(
+ removed.iterator());
+
+ // Asynchronous conversion of ISPOs to Statements.
+ return new BigdataStatementIteratorImpl(kb, src).start(kb
+ .getExecutorService());
+
+ }
+
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java 2013-09-04 19:56:42 UTC (rev 7388)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java 2013-09-04 20:18:31 UTC (rev 7389)
@@ -44,6 +44,7 @@
import com.bigdata.rdf.changesets.IChangeLog;
import com.bigdata.rdf.changesets.IChangeRecord;
import com.bigdata.rdf.changesets.InMemChangeLog;
+import com.bigdata.rdf.changesets.InferenceChangeLogReporter;
import com.bigdata.rdf.model.BigdataStatement;
import com.bigdata.rdf.model.BigdataValueFactory;
import com.bigdata.rdf.spo.ModifiedEnum;
@@ -549,6 +550,10 @@
final InMemChangeLog changeLog = new InMemChangeLog();
cxn.addChangeLog(changeLog);
+
+ final InferenceChangeLogReporter changeLog2 = new InferenceChangeLogReporter(
+ sail.getDatabase());
+ cxn.addChangeLog(changeLog2);
final BigdataValueFactory vf = (BigdataValueFactory) sail.getValueFactory();
@@ -604,6 +609,8 @@
}
compare(expected, changeLog.getLastCommit(sail.getDatabase()));
+ assertSameIteratorAnyOrder(inferred, changeLog2.addedIterator());
+ assertSameIteratorAnyOrder(new BigdataStatement[]{}, changeLog2.removedIterator());
}
for (BigdataStatement stmt : upgrades) {
@@ -656,6 +663,10 @@
final InMemChangeLog changeLog = new InMemChangeLog();
cxn.addChangeLog(changeLog);
+ final InferenceChangeLogReporter changeLog2 = new InferenceChangeLogReporter(
+ sail.getDatabase());
+ cxn.addChangeLog(changeLog2);
+
final BigdataValueFactory vf = (BigdataValueFactory) sail.getValueFactory();
final String ns = BD.NAMESPACE;
@@ -714,9 +725,14 @@
}
compare(expected, changeLog.getLastCommit(sail.getDatabase()));
+ assertSameIteratorAnyOrder(inferredAdd, changeLog2.addedIterator());
+ assertSameIteratorAnyOrder(new BigdataStatement[]{}, changeLog2.removedIterator());
}
+ // reset
+ changeLog2.clear();
+
for (BigdataStatement stmt : explicitRemove) {
cxn.remove(stmt);
}
@@ -735,7 +751,9 @@
}
compare(expected, changeLog.getLastCommit(sail.getDatabase()));
-
+ assertSameIteratorAnyOrder(new BigdataStatement[]{}, changeLog2.addedIterator());
+ assertSameIteratorAnyOrder(inferredRemove, changeLog2.removedIterator());
+
}
if (log.isDebugEnabled()) {
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <jer...@us...> - 2013-09-04 20:39:21
|
Revision: 7390
http://bigdata.svn.sourceforge.net/bigdata/?rev=7390&view=rev
Author: jeremy_carroll
Date: 2013-09-04 20:39:14 +0000 (Wed, 04 Sep 2013)
Log Message:
-----------
Fix for trac734 - problem is to do with allowing legal paths when both ends are bound. Including tests in suite.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java 2013-09-04 20:18:31 UTC (rev 7389)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java 2013-09-04 20:39:14 UTC (rev 7390)
@@ -360,8 +360,7 @@
*
* :a (:p*)* ?y
*/
- if (lowerBound == 0 && gearing.outVar != null &&
- !childSolutionIn.isBound(gearing.outVar)) {
+ if (lowerBound == 0 && canBind(gearing, childSolutionIn, seed)) {
final IBindingSet bs = parentSolutionIn.clone();
@@ -648,6 +647,20 @@
// return runningSubquery;
} // processChunk method
+
+ /**
+ * Is it possible to bind the out of the gearing to the seed?
+ * This may be because it is an unbound variable, or it may be that it is already the seed
+ * (either as a const or as a var)
+ */
+ @SuppressWarnings("unchecked")
+ private boolean canBind(final Gearing gearing, IBindingSet childSolutionIn, IConstant<?> seed) {
+ if ( gearing.outVar == null )
+ return seed.equals(gearing.outConst);
+ if ( !childSolutionIn.isBound(gearing.outVar) )
+ return true;
+ return seed.equals(childSolutionIn.get(gearing.outVar));
+ }
/**
* Choose forward or reverse gear based on the scematics of the operator
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java 2013-09-04 20:18:31 UTC (rev 7389)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java 2013-09-04 20:39:14 UTC (rev 7390)
@@ -156,6 +156,7 @@
* Tests corresponding to various trouble tickets.
*/
suite.addTestSuite(TestTickets.class);
+ suite.addTestSuite(TestTwoPropertyPaths734.class);
// test suite for inline constraints: GT, LT, GTE, LTE
suite.addTestSuite(TestInlineConstraints.class);
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2013-09-10 15:31:13
|
Revision: 7393
http://bigdata.svn.sourceforge.net/bigdata/?rev=7393&view=rev
Author: thompsonbry
Date: 2013-09-10 15:31:04 +0000 (Tue, 10 Sep 2013)
Log Message:
-----------
Refactored the GASRunner into a base class and derived a BigdataGASRunner and a SAILGASRunner from it.
Initial performance with the memory sail does not look very good. About 1/2 of the throughput that we observe with bigdata. I am going to try this on a machine with more RAM and see how that helps.
See #629 (Graph mining API)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASProgram.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASRunner.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/GASRunnerBase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASRunner.java
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASRunner.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASProgram.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASProgram.java 2013-09-10 13:55:55 UTC (rev 7392)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASProgram.java 2013-09-10 15:31:04 UTC (rev 7393)
@@ -32,6 +32,7 @@
* the generic type for the per-edge state, but that is not always
* true. The SUM type is scoped to the GATHER + SUM operation (NOT
* the computation).
+ *
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
*/
public interface IGASProgram<VS, ES, ST> extends IGASOptions<VS, ES, ST> {
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASRunner.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASRunner.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASRunner.java 2013-09-10 15:31:04 UTC (rev 7393)
@@ -0,0 +1,181 @@
+package com.bigdata.rdf.graph.impl.sail;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.log4j.Logger;
+import org.openrdf.model.Resource;
+import org.openrdf.sail.Sail;
+import org.openrdf.sail.SailConnection;
+import org.openrdf.sail.SailException;
+import org.openrdf.sail.memory.MemoryStore;
+
+import com.bigdata.rdf.graph.IGASEngine;
+import com.bigdata.rdf.graph.IGraphAccessor;
+import com.bigdata.rdf.graph.impl.sail.SAILGASEngine.SAILGraphAccessor;
+import com.bigdata.rdf.graph.impl.util.GASRunnerBase;
+import com.bigdata.rdf.graph.util.GASUtil;
+
+/**
+ * Class for running GAS performance tests against the SAIL.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+public class SAILGASRunner<VS, ES, ST> extends GASRunnerBase<VS, ES, ST> {
+
+ private static final Logger log = Logger.getLogger(SAILGASRunner.class);
+
+ public SAILGASRunner(String[] args) throws ClassNotFoundException {
+ super(args);
+ }
+
+ protected class SAILOptionData extends GASRunnerBase<VS, ES, ST>.OptionData {
+
+ private Sail sail = null;
+
+ private SailConnection cxn = null;
+
+ @Override
+ public void init() throws Exception {
+
+ super.init();
+
+ sail = new MemoryStore();
+
+ sail.initialize();
+
+ cxn = sail.getConnection();
+
+ }
+
+ @Override
+ public void shutdown() {
+
+ if (cxn != null) {
+
+ try {
+
+ cxn.close();
+
+ } catch (SailException e) {
+
+ log.error(e, e);
+
+ } finally {
+
+ cxn = null;
+
+ }
+
+ }
+
+ if (sail != null) {
+
+ try {
+
+ sail.shutDown();
+
+ } catch (SailException e) {
+
+ log.error(e,e);
+
+ } finally {
+
+ sail = null;
+
+ }
+
+ }
+
+ }
+ @Override
+ public boolean handleArg(final AtomicInteger i, final String[] args) {
+ if (super.handleArg(i, args)) {
+ return true;
+ }
+// final String arg = args[i.get()];
+// if (arg.equals("-bufferMode")) {
+// final String s = args[i.incrementAndGet()];
+// bufferModeOverride = BufferMode.valueOf(s);
+// } else if (arg.equals("-namespace")) {
+// final String s = args[i.incrementAndGet()];
+// namespaceOverride = s;
+// } else {
+// return false;
+// }
+ return false;
+ }
+
+ @Override
+ public void report(final StringBuilder sb) {
+ // NOP
+ }
+
+ } // class SAILOptionData
+
+ @Override
+ protected SAILOptionData newOptionData() {
+
+ return new SAILOptionData();
+
+ }
+
+ @Override
+ protected IGASEngine newGASEngine() {
+
+ return new SAILGASEngine(getOptionData().nthreads);
+
+ }
+
+ @Override
+ protected void loadFiles() throws Exception {
+
+ final SAILOptionData opt = getOptionData();
+ final String[] resources = opt.loadSet.toArray(new String[0]);
+
+ boolean ok = false;
+ SailConnection cxn = null;
+ try {
+ cxn = opt.cxn;
+ new GASUtil().loadGraph(cxn, null/* fallback */, resources);
+ cxn.commit();
+ ok = true;
+ } finally {
+ if (cxn != null) {
+ if (!ok)
+ cxn.rollback();
+ // Note: using the same connection, so don't close here.
+// cxn.close();
+ }
+ }
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ protected SAILOptionData getOptionData() {
+
+ return (SAILOptionData) super.getOptionData();
+
+ }
+
+ @Override
+ protected IGraphAccessor newGraphAccessor() {
+
+ return new SAILGraphAccessor(getOptionData().cxn,
+ false/* includeInferred */, new Resource[0]/* defaultContext */);
+
+ }
+
+ /**
+ * Performance testing harness.
+ *
+ * @see #GASRunner(String[])
+ */
+ @SuppressWarnings("rawtypes")
+ public static void main(final String[] args) throws Exception {
+
+ new SAILGASRunner(args).call();
+
+ }
+
+}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/GASRunnerBase.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/GASRunnerBase.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/GASRunnerBase.java 2013-09-10 15:31:04 UTC (rev 7393)
@@ -0,0 +1,464 @@
+package com.bigdata.rdf.graph.impl.util;
+
+import java.lang.reflect.Constructor;
+import java.util.LinkedHashSet;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.log4j.Logger;
+import org.openrdf.model.Value;
+
+import com.bigdata.rdf.graph.IGASContext;
+import com.bigdata.rdf.graph.IGASEngine;
+import com.bigdata.rdf.graph.IGASProgram;
+import com.bigdata.rdf.graph.IGASScheduler;
+import com.bigdata.rdf.graph.IGASSchedulerImpl;
+import com.bigdata.rdf.graph.IGASState;
+import com.bigdata.rdf.graph.IGASStats;
+import com.bigdata.rdf.graph.IGraphAccessor;
+import com.bigdata.rdf.graph.impl.GASEngine;
+import com.bigdata.rdf.graph.impl.GASState;
+import com.bigdata.rdf.graph.impl.GASStats;
+
+/**
+ * Base class for running performance tests.
+ *
+ * @param <VS>
+ * The generic type for the per-vertex state. This is scoped to the
+ * computation of the {@link IGASProgram}.
+ * @param <ES>
+ * The generic type for the per-edge state. This is scoped to the
+ * computation of the {@link IGASProgram}.
+ * @param <ST>
+ * The generic type for the SUM. This is often directly related to
+ * the generic type for the per-edge state, but that is not always
+ * true. The SUM type is scoped to the GATHER + SUM operation (NOT
+ * the computation).
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ *
+ * TODO Do we need a different driver if the algorithm always visits all
+ * vertices? For such algorithms, we just run them once per graph
+ * (unless the graph is dynamic).
+ */
+//* @param <GE>
+//* The generic type for the {@link IGASEngine}.
+//* @param <BE>
+//* The generic type for the backend implementation.
+
+public abstract class GASRunnerBase<VS, ES, ST> implements
+ Callable<IGASStats> {
+
+ private static final Logger log = Logger.getLogger(GASRunnerBase.class);
+
+ /**
+ * Configured options for the {@link GASRunner}.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+ protected class OptionData {
+ /**
+ * The seed used for the random number generator (default {@value #seed}
+ * ).
+ */
+ public long seed = 217L;
+ /**
+ * Random number generated used for sampling the starting vertices. Set
+ * by #init().
+ */
+ public Random r = null;
+ /**
+ * The #of random starting vertices to use.
+ */
+ public int nsamples = 100;
+ /**
+ * The #of threads to use for GATHER and SCATTER operators.
+ */
+ public int nthreads = 4;
+ /**
+ * The analytic class to be executed.
+ */
+ public Class<IGASProgram<VS, ES, ST>> analyticClass;
+ /**
+ * The {@link IGASSchedulerImpl} class to use.
+ *
+ * TODO Override or always? If always, then where to get the default?
+ */
+ public Class<IGASSchedulerImpl> schedulerClassOverride;
+
+ /** Set of files to load (may be empty). */
+ public final LinkedHashSet<String> loadSet = new LinkedHashSet<String>();
+
+ /** The name of the implementation specific configuration file. */
+ public String propertyFile;
+
+ protected OptionData() {
+
+ }
+
+ /**
+ * Initialize any resources, including the connection to the backend.
+ */
+ public void init() throws Exception {
+
+ // Setup the random number generator.
+ this.r = new Random(seed);
+
+ r = new Random(seed);
+
+ }
+
+ /**
+ * Shutdown any resources, including the connection to the backend.
+ * <p>
+ * Note: This method must be safe. It may be called if {@link #init()}
+ * fails. It may be called more than once.
+ */
+ public void shutdown() {
+
+ }
+
+ /**
+ * Return <code>true</code>iff one or more arguments can be parsed
+ * starting at the specified index.
+ *
+ * @param i
+ * The index into the arguments.
+ * @param args
+ * The arguments.
+ * @return <code>true</code> iff any arguments were recognized.
+ */
+ public boolean handleArg(final AtomicInteger i, final String[] args) {
+
+ return false;
+
+ }
+
+ /**
+ * Print the optional message on stderr, print the usage information on
+ * stderr, and then force the program to exit with the given status code.
+ *
+ * @param status
+ * The status code.
+ * @param msg
+ * The optional message
+ */
+ public void usage(final int status, final String msg) {
+
+ if (msg != null) {
+
+ System.err.println(msg);
+
+ }
+
+ System.err.println("[options] analyticClass propertyFile");
+
+ System.exit(status);
+
+ }
+
+ /**
+ * Extension hook for reporting at the end of the test run.
+ *
+ * @param sb A buffer into which more information may be appended.
+ */
+ public void report(final StringBuilder sb) {
+
+ // NOP
+
+ }
+
+ } // class OptionData
+
+ /**
+ * The configuration metadata for the run.
+ */
+ private final OptionData opt;
+
+ /**
+ * Factory for the {@link OptionData}.
+ */
+ abstract protected OptionData newOptionData();
+
+ /**
+ * The {@link OptionData} for the run.
+ */
+ protected OptionData getOptionData() {
+
+ return opt;
+
+ }
+
+ /**
+ * Factory for the {@link IGASEngine}.
+ */
+ abstract protected IGASEngine newGASEngine();
+
+ /**
+ * Load files into the backend if they can not be assumed to already exist
+ * (a typical pattern is that files are loaded into an empty KB instance,
+ * but not loaded into a pre-existing one).
+ *
+ * @throws Exception
+ */
+ abstract protected void loadFiles() throws Exception;
+
+ /**
+ * Run a GAS analytic against some data set.
+ *
+ * @param args
+ * USAGE:<br/>
+ * <code>(options) analyticClass propertyFile</code>
+ * <p>
+ * <i>Where:</i>
+ * <dl>
+ * <dt>propertyFile</dt>
+ * <dd>The implementation specific property file or other type of
+ * configuration file.</dd>
+ * </dl>
+ * and <i>options</i> are any of:
+ * <dl>
+ * <dt>-nthreads</dt>
+ * <dd>The #of threads which will be used for GATHER and SCATTER
+ * operations.</dd>
+ * <dt>-nsamples</dt>
+ * <dd>The #of random sample starting vertices that will be
+ * selected. The algorithm will be run ONCE for EACH sampled
+ * vertex.</dd>
+ * <dt>-seed</dt>
+ * <dd>The seed for the random number generator (default is
+ * <code>217L</code>).</dd>
+ * <dt>-schedulerClass</dt>
+ * <dd>Override the default {@link IGASScheduler}. Class must
+ * implement {@link IGASSchedulerImpl}.</dd>
+ * <dt>-load</dt>
+ * <dd>Loads the named resource IFF the KB is empty (or does not
+ * exist) at the time this utility is executed. This option may
+ * appear multiple times. The resources will be searched for as
+ * URLs, on the CLASSPATH, and in the file system.</dd>
+ * </p>
+ * @throws ClassNotFoundException
+ */
+ public GASRunnerBase(final String[] args) throws ClassNotFoundException {
+
+ final OptionData opt = newOptionData();
+
+ /*
+ * Handle all arguments starting with "-". These should appear before
+ * any non-option arguments to the program.
+ */
+ final AtomicInteger i = new AtomicInteger(0);
+ while (i.get() < args.length) {
+ final String arg = args[i.get()];
+ if (arg.startsWith("-")) {
+ if (arg.equals("-seed")) {
+ opt.seed = Long.valueOf(args[i.incrementAndGet()]);
+ } else if (arg.equals("-nsamples")) {
+ final String s = args[i.incrementAndGet()];
+ opt.nsamples = Integer.valueOf(s);
+ if (opt.nsamples <= 0) {
+ opt.usage(1/* status */,
+ "-nsamples must be positive, not: " + s);
+ }
+ } else if (arg.equals("-nthreads")) {
+ final String s = args[i.incrementAndGet()];
+ opt.nthreads = Integer.valueOf(s);
+ if (opt.nthreads < 0) {
+ opt.usage(1/* status */,
+ "-nthreads must be non-negative, not: " + s);
+ }
+ } else if (arg.equals("-schedulerClass")) {
+ final String s = args[i.incrementAndGet()];
+ opt.schedulerClassOverride = (Class<IGASSchedulerImpl>) Class.forName(s);
+ } else if (arg.equals("-load")) {
+ final String s = args[i.incrementAndGet()];
+ opt.loadSet.add(s);
+ } else {
+ if (!opt.handleArg(i, args)) {
+ opt.usage(1/* status */, "Unknown argument: " + arg);
+ }
+ }
+ } else {
+ break;
+ }
+ i.incrementAndGet();
+ }
+
+ /*
+ * Check for the remaining (required) argument(s).
+ */
+ final int nremaining = args.length - i.get();
+ if (nremaining != 2) {
+ /*
+ * There are either too many or too few arguments remaining.
+ */
+ opt.usage(1/* status */, nremaining < 1 ? "Too few arguments."
+ : "Too many arguments");
+ }
+
+ /*
+ * The analytic to be executed.
+ */
+ {
+
+ final String s = args[i.getAndIncrement()];
+
+ opt.analyticClass = (Class<IGASProgram<VS, ES, ST>>) Class
+ .forName(s);
+
+ }
+
+ /*
+ * Property file.
+ */
+ opt.propertyFile = args[i.getAndIncrement()];
+
+ this.opt = opt; // assign options.
+
+ }
+
+ /**
+ * Return the object used to access the as-configured graph.
+ */
+ abstract protected IGraphAccessor newGraphAccessor();
+
+ /**
+ * Return an instance of the {@link IGASProgram} to be evaluated.
+ */
+ protected IGASProgram<VS, ES, ST> newGASProgram() {
+
+ final Class<IGASProgram<VS, ES, ST>> cls = (Class<IGASProgram<VS, ES, ST>>)opt.analyticClass;
+
+ try {
+
+ final Constructor<IGASProgram<VS, ES, ST>> ctor = cls
+ .getConstructor(new Class[] {});
+
+ final IGASProgram<VS, ES, ST> gasProgram = ctor
+ .newInstance(new Object[] {});
+
+ return gasProgram;
+
+ } catch (Exception e) {
+
+ throw new RuntimeException(e);
+
+ }
+
+ }
+
+ /**
+ * Run the test.
+ * <p>
+ * This provides a safe pattern for either loading data into a temporary
+ * journal, which is then destroyed, or using an exiting journal and
+ * optionally loading in some data set. When we load the data the journal is
+ * destroyed afterwards and when the journal is pre-existing and we neither
+ * load the data nor destroy the journal. This has to do with the effective
+ * BufferMode (if transient) and whether the file is specified and whether a
+ * temporary file is created (CREATE_TEMP_FILE). If we do our own file
+ * create if the effective buffer mode is non-transient, then we can get all
+ * this information.
+ */
+ @Override
+ final public IGASStats call() throws Exception {
+
+ try {
+
+ // initialize backend / connection to backend.
+ opt.init();
+
+ // Load data sets
+ loadFiles();
+
+ // Run GAS program.
+ return runAnalytic();
+
+ } finally {
+
+ // Shutdown backend / connection to backend.
+ opt.shutdown();
+
+ }
+
+ }
+
+ /**
+ * Run the analytic.
+ *
+ * @return The performance statistics for the run.
+ *
+ * @throws Exception
+ */
+ final protected IGASStats runAnalytic() throws Exception {
+
+ final IGASEngine gasEngine = newGASEngine();
+
+ try {
+
+ if (opt.schedulerClassOverride != null) {
+
+ ((GASEngine) gasEngine)
+ .setSchedulerClass(opt.schedulerClassOverride);
+
+ }
+
+ final IGASProgram<VS, ES, ST> gasProgram = newGASProgram();
+
+ final IGraphAccessor graphAccessor = newGraphAccessor();
+
+ final IGASContext<VS, ES, ST> gasContext = gasEngine.newGASContext(
+ graphAccessor, gasProgram);
+
+ final IGASState<VS, ES, ST> gasState = gasContext.getGASState();
+
+ final VertexDistribution dist = graphAccessor.getDistribution(opt.r);
+
+ final Value[] samples = dist.getWeightedSample(opt.nsamples);
+
+ final IGASStats total = new GASStats();
+
+ for (int i = 0; i < samples.length; i++) {
+
+ final Value startingVertex = samples[i];
+
+ gasState.init(startingVertex);
+
+ final IGASStats stats = (IGASStats) gasContext.call();
+
+ total.add(stats);
+
+ if (log.isInfoEnabled()) {
+ log.info("Run complete: vertex[" + i + "] of "
+ + samples.length + " : startingVertex="
+ + startingVertex + ", stats(sample)=" + stats);
+ }
+
+ }
+
+ // Total over all sampled vertices.
+ final StringBuilder sb = new StringBuilder();
+ sb.append("TOTAL");
+ sb.append(": analytic=" + gasProgram.getClass().getSimpleName());
+ sb.append(", nseed=" + opt.seed);
+ sb.append(", nsamples=" + opt.nsamples);
+ sb.append(", nthreads=" + opt.nthreads);
+ sb.append(", scheduler=" + ((GASState<VS, ES, ST>)gasState).getScheduler().getClass().getSimpleName());
+ sb.append(", gasEngine=" + gasEngine.getClass().getSimpleName());
+ opt.report(sb); // extension hook.
+ // performance results.
+ sb.append(", stats(total)=" + total);
+ System.out.println(sb);
+
+ return total;
+
+ } finally {
+
+ gasEngine.shutdownNow();
+
+ }
+
+ }
+
+}
Copied: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASRunner.java (from rev 7382, branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASRunner.java)
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASRunner.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASRunner.java 2013-09-10 15:31:04 UTC (rev 7393)
@@ -0,0 +1,544 @@
+package com.bigdata.rdf.graph.impl.bd;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.lang.reflect.Constructor;
+import java.util.Properties;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.log4j.Logger;
+import org.openrdf.sail.SailConnection;
+
+import com.bigdata.Banner;
+import com.bigdata.journal.BufferMode;
+import com.bigdata.journal.ITx;
+import com.bigdata.journal.Journal;
+import com.bigdata.rdf.graph.IGASProgram;
+import com.bigdata.rdf.graph.IGraphAccessor;
+import com.bigdata.rdf.graph.impl.bd.BigdataGASEngine.BigdataGraphAccessor;
+import com.bigdata.rdf.graph.impl.util.GASRunnerBase;
+import com.bigdata.rdf.graph.util.GASUtil;
+import com.bigdata.rdf.sail.BigdataSail;
+import com.bigdata.rdf.store.AbstractTripleStore;
+
+/**
+ * Base class for running performance tests against the bigdata backend.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+public class BigdataGASRunner<VS, ES, ST> extends GASRunnerBase<VS, ES, ST> {
+
+ private static final Logger log = Logger.getLogger(BigdataGASRunner.class);
+
+ /**
+ * Configured options for the {@link GASRunner}.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+ protected class BigdataOptionData extends
+ GASRunnerBase<VS, ES, ST>.OptionData {
+
+ /**
+ * The {@link BufferMode} to use.
+ */
+ private BufferMode bufferModeOverride = null; // override only.
+
+ /**
+ * The namespace of the bigdata KB instance.
+ */
+ private String namespaceOverride = "kb";
+
+ /**
+ * The as-configured {@link Properties} for the {@link Journal}.
+ */
+ private Properties properties;
+
+ /**
+ * The effective KB name. This is set by consulting
+ * {@link #namespaceOverride} and the as configured {@link #properties}.
+ */
+ private String namespace;
+
+ /**
+ * The backend.
+ *
+ * TODO Could start NSS and use SPARQL UPDATE "LOAD" to load the data.
+ * That exposes the SPARQL end point for other purposes during the test.
+ * Is this useful? It could also let us run the GASEngine on a remote
+ * service (submit a callable to an HA server or define a REST API for
+ * submitting these GAS algorithms).
+ */
+ private Journal jnl;
+
+ /**
+ * <code>true</code> iff the backend is temporary (created on a
+ * temporary backing file). Temporary backends are destroyed in
+ * {@link #shutdown()}.
+ */
+ private boolean isTemporary;
+
+ /**
+ * Set to <code>true</code> iff we determine that the data needs to be
+ * loaded (e.g., the KB was empty, so we have to load the data sets).
+ *
+ * TODO Rename for clearer semantics. Basically, do we have to load the
+ * data files or can we assume that the data are already loaded. Lift
+ * into base class?
+ */
+ private boolean newKB = false;
+
+ /**
+ * The #of edges in the KB instance and <code>-1</code> until set by
+ * {@link BigdataGASRunner#loadFiles()}.
+ */
+ private long nedges = -1;
+
+ protected BigdataOptionData() {
+
+ super();
+
+ }
+
+ private Properties getProperties(final String resource) throws IOException {
+
+ if (log.isInfoEnabled())
+ log.info("Reading properties: " + resource);
+
+ InputStream is = null;
+ try {
+
+ // try the classpath
+ is = getClass().getResourceAsStream(resource);
+
+ if (is != null) {
+
+ } else {
+
+ // try file system.
+ final File file = new File(resource);
+
+ if (file.exists()) {
+
+ is = new FileInputStream(file);
+
+ } else {
+
+ throw new IOException("Could not locate resource: "
+ + resource);
+
+ }
+
+ }
+
+ /*
+ * Obtain a buffered reader on the input stream.
+ */
+
+ final Properties properties = new Properties();
+
+ final Reader reader = new BufferedReader(new InputStreamReader(is));
+
+ try {
+
+ properties.load(reader);
+
+ } finally {
+
+ try {
+
+ reader.close();
+
+ } catch (Throwable t) {
+
+ log.error(t);
+
+ }
+
+ }
+
+ /*
+ * Allow override of select options from the command line.
+ */
+ {
+ final String[] overrides = new String[] {
+ // Journal options.
+ com.bigdata.journal.Options.FILE,
+// // RDFParserOptions.
+// RDFParserOptions.Options.DATATYPE_HANDLING,
+// RDFParserOptions.Options.PRESERVE_BNODE_IDS,
+// RDFParserOptions.Options.STOP_AT_FIRST_ERROR,
+// RDFParserOptions.Options.VERIFY_DATA,
+// // DataLoader options.
+// DataLoader.Options.BUFFER_CAPACITY,
+// DataLoader.Options.CLOSURE,
+// DataLoader.Options.COMMIT,
+// DataLoader.Options.FLUSH,
+ };
+ for (String s : overrides) {
+ if (System.getProperty(s) != null) {
+ // Override/set from the environment.
+ final String v = System.getProperty(s);
+ if (log.isInfoEnabled())
+ log.info("OVERRIDE:: Using: " + s + "=" + v);
+ properties.setProperty(s, v);
+ }
+ }
+ }
+
+ return properties;
+
+ } finally {
+
+ if (is != null) {
+
+ try {
+
+ ...
[truncated message content] |
|
From: <tho...@us...> - 2013-09-12 13:54:05
|
Revision: 7396
http://bigdata.svn.sourceforge.net/bigdata/?rev=7396&view=rev
Author: thompsonbry
Date: 2013-09-12 13:53:51 +0000 (Thu, 12 Sep 2013)
Log Message:
-----------
Fix for #741 (ctc-stiterators module must be independent, Apache 2 license).
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpContext.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpUtility.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/NamedSolutionSetRefUtility.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/CopyOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/EndOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/AbstractSubqueryOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/HTreeNamedSubqueryOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/JVMNamedSubqueryOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/SubqueryOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IChunkAccessor.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryResultIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/EmptyChunkMessage.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinUtility.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/HashIndexOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/HashJoinOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/IDistinctFilter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/IHashJoinUtility.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/JVMDistinctFilter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/JVMHashJoinUtility.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/NestedLoopJoinOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/SolutionSetHashJoinOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/mutation/InsertOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ZeroLengthPathOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/DropOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/HTreeDistinctBindingSetsOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/ISolutionSet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/JVMDistinctBindingSetsOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemoryGroupByOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemorySortOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/PipelinedAggregationOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/ProjectionOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/AbstractBTree.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/EntryScanIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/ISimpleIndexAccess.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/AbstractHTree.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/ChunkConsumerIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/EmptyCloseableIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/IAsynchronousIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/IBindingSetAccessPath.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/IMultiSourceCloseableIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialCloseableIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/ThickCloseableIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/ndx/ClientIndexView.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/proxy/RemoteAsynchronousIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/stream/Stream.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/Appender.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/ChunkedArraysIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/ChunkedResolvingIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/Chunkerator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/ClosableEmptyIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/ClosableSingleItemIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/CloseableIteratorWrapper.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/Dechunkerator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/Filter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/IChunkedIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/IStriterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/MergeFilter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/PushbackIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/Resolver.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/Striterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/ap/TestPredicateAccessPath.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/bset/TestConditionalRoutingOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/bset/TestCopyBindingSets.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/AbstractQueryEngineTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_Slice.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_SortOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/fed/TestRemoteAccessPath.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/join/AbstractHashJoinUtilityTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/join/HashIndexOpTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/relation/accesspath/TestThickCloseableIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/java/com/bigdata/gom/gpo/GPO.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/java/com/bigdata/gom/gpo/LinkSet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/java/com/bigdata/gom/om/IObjectManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/java/com/bigdata/gom/om/NanoSparqlObjectManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/java/com/bigdata/gom/om/ObjectManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/java/com/bigdata/gom/om/ObjectMgrModel.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/samples/com/bigdata/gom/samples/Example1.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/samples/com/bigdata/gom/samples/Example2.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/test/com/bigdata/gom/Example1.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/filter/NativeDistinctFilter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/join/ChunkedMaterializationIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/join/ChunkedMaterializationOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/join/DataSetJoin.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/join/InlineMaterializeOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/update/AbstractAddRemoveStatementsOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InferenceChangeLogReporter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainTypeResourceIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/encoder/SolutionSetStreamDecoder.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/encoder/SolutionSetStreamEncoder.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/SolutionSetStatserator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUpdate.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTConstructIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/DGExpander.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SampleServiceFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SearchInSearchServiceFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SearchServiceFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SliceServiceFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ValuesServiceFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCall.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ssets/ISolutionSetManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ssets/SolutionSetManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/DistinctSPOIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPORelation.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataStatementIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataValueIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/TripleStoreUtility.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/joinGraph/AbstractJoinGraphTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestFullTextIndex.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestSubjectCentricFullTextIndex.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestMatch.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestModelsEqual.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/BigdataNativeMockServiceFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/OpenrdfNativeMockServiceFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/ssets/TestSolutionSetManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOKeyOrder.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2Sesame2BindingSetIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2SesameIteration.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/src/java/cutthecrap/utils/striterators/IFilter.java
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/src/java/cutthecrap/utils/striterators/SingleValueIterator.java
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/src/java/cutthecrap/utils/striterators/Striterator.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/LEGAL/apache-license-2_0.txt
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/LEGAL/junit-license.html
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/LEGAL/log4j-license.txt
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/NOTICE
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/build.properties
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/build.xml
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/lib/
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/lib/junit-3.8.1.jar
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/lib/junit-ext-1.1-b3-dev.jar
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/lib/log4j-1.2.15.jar
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/src/java/cutthecrap/utils/striterators/ICloseable.java
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/src/java/cutthecrap/utils/striterators/ICloseableIterator.java
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/ICloseable.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/ICloseableIterator.java
Property Changed:
----------------
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpContext.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpContext.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpContext.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -61,8 +61,9 @@
import com.bigdata.striterator.Chunkerator;
import com.bigdata.striterator.CloseableIteratorWrapper;
import com.bigdata.striterator.IChunkedIterator;
-import com.bigdata.striterator.ICloseableIterator;
+import cutthecrap.utils.striterators.ICloseableIterator;
+
/**
* The evaluation context for the operator (NOT serializable).
*
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpUtility.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpUtility.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpUtility.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -49,11 +49,11 @@
import com.bigdata.rdf.sparql.ast.IGroupMemberNode;
import com.bigdata.relation.accesspath.IBlockingBuffer;
import com.bigdata.striterator.CloseableIteratorWrapper;
-import com.bigdata.striterator.ICloseableIterator;
import cutthecrap.utils.striterators.EmptyIterator;
import cutthecrap.utils.striterators.Expander;
import cutthecrap.utils.striterators.Filter;
+import cutthecrap.utils.striterators.ICloseableIterator;
import cutthecrap.utils.striterators.SingleValueIterator;
import cutthecrap.utils.striterators.Striterator;
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/NamedSolutionSetRefUtility.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/NamedSolutionSetRefUtility.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/NamedSolutionSetRefUtility.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -42,8 +42,9 @@
import com.bigdata.rdf.sparql.ast.ssets.ISolutionSetManager;
import com.bigdata.rdf.store.AbstractTripleStore;
import com.bigdata.striterator.Chunkerator;
-import com.bigdata.striterator.ICloseableIterator;
+import cutthecrap.utils.striterators.ICloseableIterator;
+
/**
* Utility class for {@link INamedSolutionSetRef}s.
*
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -40,8 +40,9 @@
import com.bigdata.bop.PipelineOp;
import com.bigdata.bop.engine.BOpStats;
import com.bigdata.relation.accesspath.IBlockingBuffer;
-import com.bigdata.striterator.ICloseableIterator;
+import cutthecrap.utils.striterators.ICloseableIterator;
+
/**
* An operator for conditional routing of binding sets in a pipeline. The
* operator will copy binding sets either to the default sink (if a condition is
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/CopyOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/CopyOp.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/CopyOp.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -43,8 +43,9 @@
import com.bigdata.bop.engine.IChunkAccessor;
import com.bigdata.relation.accesspath.IBlockingBuffer;
import com.bigdata.relation.accesspath.ThickAsynchronousIterator;
-import com.bigdata.striterator.ICloseableIterator;
+import cutthecrap.utils.striterators.ICloseableIterator;
+
/**
* This operator copies its source to its sink(s). Specializations exist which are
* used to feed the the initial set of intermediate results into a pipeline (
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/EndOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/EndOp.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/EndOp.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -11,8 +11,9 @@
import com.bigdata.bop.PipelineOp;
import com.bigdata.bop.solutions.SliceOp;
import com.bigdata.relation.accesspath.IBlockingBuffer;
-import com.bigdata.striterator.ICloseableIterator;
+import cutthecrap.utils.striterators.ICloseableIterator;
+
/**
* A operator which may be used at the end of query pipelines when there is a
* requirement to marshal solutions back to the query controller but no
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/AbstractSubqueryOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/AbstractSubqueryOp.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/AbstractSubqueryOp.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -44,9 +44,10 @@
import com.bigdata.bop.bset.Tee;
import com.bigdata.bop.engine.IRunningQuery;
import com.bigdata.bop.engine.QueryEngine;
-import com.bigdata.striterator.ICloseableIterator;
import com.bigdata.util.concurrent.LatchedExecutor;
+import cutthecrap.utils.striterators.ICloseableIterator;
+
/**
* Executes each of the operands as a subquery. The operands are evaluated in
* the order given and with the annotated parallelism. Each subquery is run as a
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/HTreeNamedSubqueryOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/HTreeNamedSubqueryOp.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/HTreeNamedSubqueryOp.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -53,8 +53,8 @@
import com.bigdata.bop.join.JoinTypeEnum;
import com.bigdata.bop.join.NamedSolutionSetStats;
import com.bigdata.relation.accesspath.IBlockingBuffer;
-import com.bigdata.striterator.ICloseableIterator;
+import cutthecrap.utils.striterators.ICloseableIterator;
import cutthecrap.utils.striterators.SingleValueIterator;
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/JVMNamedSubqueryOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/JVMNamedSubqueryOp.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/JVMNamedSubqueryOp.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -53,8 +53,8 @@
import com.bigdata.bop.join.JoinTypeEnum;
import com.bigdata.bop.join.NamedSolutionSetStats;
import com.bigdata.relation.accesspath.IBlockingBuffer;
-import com.bigdata.striterator.ICloseableIterator;
+import cutthecrap.utils.striterators.ICloseableIterator;
import cutthecrap.utils.striterators.SingleValueIterator;
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -69,10 +69,10 @@
import com.bigdata.relation.accesspath.IBuffer;
import com.bigdata.relation.accesspath.UnsyncLocalOutputBuffer;
import com.bigdata.striterator.ChunkedArrayIterator;
-import com.bigdata.striterator.ICloseableIterator;
import com.bigdata.util.InnerCause;
import com.bigdata.util.concurrent.LatchedExecutor;
+import cutthecrap.utils.striterators.ICloseableIterator;
import cutthecrap.utils.striterators.SingleValueIterator;
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/SubqueryOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/SubqueryOp.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/SubqueryOp.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -53,8 +53,9 @@
import com.bigdata.rdf.internal.impl.literal.XSDBooleanIV;
import com.bigdata.rdf.model.BigdataLiteral;
import com.bigdata.relation.accesspath.IBlockingBuffer;
-import com.bigdata.striterator.ICloseableIterator;
+import cutthecrap.utils.striterators.ICloseableIterator;
+
/**
* Pipelined join with subquery.
* <p>
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -67,11 +67,12 @@
import com.bigdata.rwstore.sector.IMemoryManager;
import com.bigdata.rwstore.sector.MemoryManager;
import com.bigdata.service.IBigdataFederation;
-import com.bigdata.striterator.ICloseableIterator;
import com.bigdata.util.InnerCause;
import com.bigdata.util.concurrent.Haltable;
import com.bigdata.util.concurrent.IHaltable;
+import cutthecrap.utils.striterators.ICloseableIterator;
+
/**
* Abstract base class for various {@link IRunningQuery} implementations. The
* purpose of this class is to isolate aspects common to different designs for
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -62,10 +62,11 @@
import com.bigdata.relation.accesspath.MultiSourceSequentialCloseableIterator;
import com.bigdata.rwstore.sector.IMemoryManager;
import com.bigdata.service.IBigdataFederation;
-import com.bigdata.striterator.ICloseableIterator;
import com.bigdata.util.concurrent.Memoizer;
import com.sun.jini.thread.Executor;
+import cutthecrap.utils.striterators.ICloseableIterator;
+
/**
* {@link IRunningQuery} implementation based on the assignment of
* {@link IChunkMessage}(s) to an operator task. Operators (other than those
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IChunkAccessor.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IChunkAccessor.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IChunkAccessor.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -33,8 +33,9 @@
import com.bigdata.bop.IBindingSet;
import com.bigdata.relation.accesspath.BlockingBuffer;
import com.bigdata.striterator.IChunkedIterator;
-import com.bigdata.striterator.ICloseableIterator;
+import cutthecrap.utils.striterators.ICloseableIterator;
+
/**
* API providing a variety of ways to access chunks of data (data are typically
* elements or binding sets).
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -38,9 +38,10 @@
import com.bigdata.btree.ILocalBTreeView;
import com.bigdata.journal.IIndexManager;
import com.bigdata.service.IBigdataFederation;
-import com.bigdata.striterator.ICloseableIterator;
import com.bigdata.util.concurrent.IHaltable;
+import cutthecrap.utils.striterators.ICloseableIterator;
+
/**
* Non-Remote interface exposing a limited set of the state of an executing
* query.
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java 2013-09-12 12:35:35 UTC (rev 7395)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java 2013-09-12 13:53:51 UTC (rev 7396)
@@ -7,8 +7,9 @@
import com.bigdata.bop.IBindingSet;
import com.bigdata.bop.fed.FederatedRunningQuery;
import com.bigdata.relation.accesspath.ThickCloseableIterator;
-import com.bigdata.striterator.ICloseableIterator;
+import cutthecrap.utils.striterators.ICloseableIterator;
+
/**
* A chunk of intermediate results which are ready to be consumed by some
* {@link BOp} in a specific query.
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/sr...
[truncated message content] |
|
From: <tho...@us...> - 2013-09-12 15:58:47
|
Revision: 7397
http://bigdata.svn.sourceforge.net/bigdata/?rev=7397&view=rev
Author: thompsonbry
Date: 2013-09-12 15:58:40 +0000 (Thu, 12 Sep 2013)
Log Message:
-----------
Created a bigdata-commons module. This module is currently empty. We should move some of the common classes into here and then remove them from the other modules. The main classes that are getting duplicated are DaemonThreadFactory and CAT. However, we could actually move much more into this commons package.
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/LEGAL/
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/LEGAL/apache-license-2_0.txt
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/NOTICE
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/build.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/build.xml
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/lib/
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/src/
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/src/java/
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/src/java/com/
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/src/java/com/bigdata/
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/src/java/com/bigdata/commons/
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/src/test/
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/src/test/com/
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/src/test/com/bigdata/
branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/src/test/com/bigdata/commons/
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/LEGAL/apache-license-2_0.txt
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/LEGAL/apache-license-2_0.txt (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/LEGAL/apache-license-2_0.txt 2013-09-12 15:58:40 UTC (rev 7397)
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/NOTICE
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/NOTICE (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/NOTICE 2013-09-12 15:58:40 UTC (rev 7397)
@@ -0,0 +1,18 @@
+
+ Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+----
+This product includes software developed by The Apache Software Foundation (http://www.apache.org/).
+License: http://www.apache.org/licenses/LICENSE-2.0
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/build.properties
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/build.properties (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/build.properties 2013-09-12 15:58:40 UTC (rev 7397)
@@ -0,0 +1,50 @@
+# The root of the checked out svn source. This assumes that you have checked
+# out the trunk so that all modules were automatically checked out and are in
+# direct subdirectories of the directory containing this properties file and
+# the ant build.xml file.
+bigdata-commons.dir=.
+# Where the generated files will be written.
+# build.dir/classes [compiled classes and misc resources for classpath]
+# build.dir/docs [generated documentation].
+# build.dir/docs/api [generated javadoc].
+# build.dir/lib [bundled libraries copied here for easier deployment]
+# build.dir/src [source code copied here for releases]
+build.dir=ant-build
+
+##
+# javac options
+##
+
+# debug=on|off
+javac.debug=on
+# debuglevel=lines,vars,source (or any combination thereof).
+javac.debuglevel=lines,vars,source
+javac.verbose=off
+#javac.target=1.6
+#javac.source=1.6
+javac.encoding=Cp1252
+
+# Where to find the library dependencies (junit)
+build.lib.dir=lib
+
+# Where the write the output from the unit tests.
+test.results.dir=${build.dir}/test-results
+
+##
+# Properties for creating a release.
+##
+
+# Where the releases will be written.
+release.dir=ant-release
+
+# The build version.
+build.ver=0.1.0
+
+# Set true to do a snapshot build. This changes the value of ${version} to
+# include the date.
+#snapshot=true
+snapshot=
+
+# Javadoc build may be disabled using this property. The javadoc target will
+# not be executed unless this property is defined (its value does not matter).
+javadoc=
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/build.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/build.xml (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-commons/build.xml 2013-09-12 15:58:40 UTC (rev 7397)
@@ -0,0 +1,161 @@
+<project name="bigdata-commons" default="all" basedir=".">
+
+ <property file="build.properties" />
+
+ <path id="build.classpath">
+ <fileset dir="${build.lib.dir}">
+ <include name="**/*.jar" />
+ </fileset>
+ </path>
+
+ <path id="test.classpath">
+ <pathelement location="${build.dir}/classes" />
+ <pathelement location="${build.dir}/test" />
+ <fileset dir="${build.lib.dir}">
+ <include name="junit*.jar" />
+ <include name="log4j*.jar" />
+ </fileset>
+ </path>
+
+ <path id="runtime.classpath">
+ <pathelement location="${build.dir}/classes" />
+ <path refid="build.classpath" />
+ </path>
+
+ <target name="clean" description="cleans everything in [build.dir], but not the releases.">
+ <delete dir="${build.dir}" />
+ </target>
+
+ <target name="prepare">
+ <!-- setup ${version} for regular or snapshot. -->
+ <tstamp>
+ <format property="today" pattern="ddMMyy" locale="en,US" />
+ </tstamp>
+ <condition property="version"
+ value="bigdata-commons-${build.ver}-${today}"
+ else="bigdata-commons-${build.ver}">
+ <istrue value="${snapshot}" />
+ </condition>
+ <!--<echo message="today=${today}"/>-->
+ <echo message="version=${version}" />
+ <!-- create directories. -->
+ <mkdir dir="${build.dir}" />
+ <mkdir dir="${build.dir}/classes" />
+ <mkdir dir="${build.dir}/docs" />
+ <mkdir dir="${build.dir}/lib" />
+ <mkdir dir="${build.dir}/test" />
+ <mkdir dir="${build.dir}/test-results" />
+ </target>
+
+ <target name="compile" depends="prepare">
+ <mkdir dir="${build.dir}" />
+ <javac destdir="${build.dir}/classes" classpathref="build.classpath" debug="${javac.debug}" debuglevel="${javac.debuglevel}" verbose="${javac.verbose}" encoding="${javac.encoding}">
+ <!-- note: must also specify -bootclasspath and -extdirs when cross-compiling -->
+ <!-- target="${javac.target}" source="${javac.source}" -->
+ <src path="${bigdata-commons.dir}/src/java" />
+ <!-- Do not include the unit tests @todo conditionally include?
+ <src path="${bigdata-commons.dir}/src/test"/>
+ -->
+ </javac>
+ <!-- copy resources. -->
+ <copy toDir="${build.dir}/classes">
+ <fileset dir="${bigdata-commons.dir}/src/java">
+ <exclude name="**/*.java" />
+ <exclude name="**/package.html" />
+ </fileset>
+ </copy>
+ </target>
+
+ <target name="jar" depends="compile" description="Generates the jar.">
+ <jar destfile="${build.dir}/${version}.jar">
+ <fileset dir="${build.dir}/classes" />
+ <manifest>
+ </manifest>
+ </jar>
+ </target>
+
+ <target name="test" depends="clean, compile">
+ <javac destdir="${build.dir}/test" debug="${javac.debug}" debuglevel="${javac.debuglevel}" verbose="${javac.verbose}" encoding="${javac.encoding}">
+ <classpath refid="test.classpath" />
+ <src path="${bigdata-commons.dir}/src/test" />
+ </javac>
+ <!-- copy resources. -->
+ <copy toDir="${build.dir}/test">
+ <fileset dir="${bigdata-commons.dir}/src/test">
+ <exclude name="**/*.java" />
+ </fileset>
+ </copy>
+ <junit printsummary="on" haltonfailure="yes" fork="no" dir="${classes.dir}" timeout="60000">
+ <classpath refid="test.classpath" />
+ <formatter type="xml" />
+ <!-- Individual test suite to run when -DtestName is set -->
+ <!-- to the fully-qualified name of the test suite -->
+ <!-- ant -DtestName=com.bigdata.cache.TestAll junit -->
+ <test name="${testName}" todir="${test.results.dir}" if="testName" />
+ <!-- Test suites to run when -DtestName is not set -->
+ <test name="cutthecrap.utils.striterators.TestAll" todir="${test.results.dir}" unless="testName" />
+ </junit>
+ <!-- Generate an HTML report. -->
+ <junitreport todir="${build.dir}">
+ <fileset dir="${test.results.dir}">
+ <include name="TEST-*.xml" />
+ </fileset>
+ <report format="frames" todir="${test.results.dir}/report" />
+ </junitreport>
+ </target>
+
+ <target name="javadoc" depends="prepare" if="javadoc">
+ <mkdir dir="${build.dir}/docs/api" />
+ <!-- overview="....overview.html" -->
+ <javadoc destdir="${build.dir}/docs/api" defaultexcludes="yes" author="true" version="true" use="true" windowtitle="bigdata-commons" classpathref="build.classpath">
+ <arg value="-J-Xmx1000m" />
+ <packageset dir="${bigdata-commons.dir}/src/java" />
+ <doctitle>
+ <![CDATA[<h1>bigdata-commons</h1>]]></doctitle>
+ <bottom>
+ <![CDATA[
+<i>
+
+ Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved.<p>
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at<p>
+
+ http://www.apache.org/licenses/LICENSE-2.0<p>
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an 'AS IS' BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.<p>
+
+ <hr>
+ This product includes software developed by The Apache Software Foundation (http://www.apache.org/).
+ License: http://www.apache.org/licenses/LICENSE-2.0
+</i>]]></bottom>
+ <!--tag name="FIXME" scope="all" description="FIXME:"/-->
+ <link href="http://java.sun.com/j2se/1.5.0/docs/api/" />
+ </javadoc>
+ </target>
+
+ <target name="release" depends="prepare"
+ description="Create complete source tar file.">
+ <tar destfile="${bigdata-commons.dir}/DIST.${version}.tgz" compression="gzip">
+ <tarfileset dir="${bigdata-commons.dir}" prefix="${version}">
+ <include name="build.properties" />
+ <include name="build.xml" />
+ <include name="NOTICE" />
+ <include name="**/LEGAL/*" />
+ <include name="src/**" />
+ <exclude name="classes/**" />
+ <exclude name="ant-build/**" />
+ <exclude name="lib/**" />
+ </tarfileset>
+ </tar>
+ </target>
+
+ <target name="all" depends="clean, test, jar, javadoc, release"
+ description="clean, test, jar, javadoc, release" />
+
+</project>
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2013-09-12 16:02:13
|
Revision: 7398
http://bigdata.svn.sourceforge.net/bigdata/?rev=7398&view=rev
Author: thompsonbry
Date: 2013-09-12 16:02:04 +0000 (Thu, 12 Sep 2013)
Log Message:
-----------
Decoupled the Apache2 module for the GAS Engine. I had to duplicate two classes (DeamonThreadFactory and Sesame2BigdataIterator).
There is currently a problem running ant for the bigdata-gas module under eclipse. In part, this may have to do with the path to the smallGraph.ttl file but it also appears to be confounded with the JDK6/JDK7 external executable configurations in exclipse.
See #629 (Graph mining API)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/build.xml
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASEngine.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASStats.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASEngine.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/scheduler/STScheduler.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/ManagedArray.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/ManagedIntArray.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/util/GASUtil.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/impl/sail/AbstractSailGraphTestCase.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/lib/ctc-striterators-0.1.0.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/lib/slf4j-api-1.6.1.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/lib/slf4j-log4j12-1.6.1.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/DaemonThreadFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/Sesame2BigdataIterator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/TLScheduler.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/TLScheduler2.java
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/scheduler/TLScheduler.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/scheduler/TLScheduler2.java
Property Changed:
----------------
branches/BIGDATA_RELEASE_1_3_0/ctc-striterators/
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/build.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/build.xml 2013-09-12 15:58:40 UTC (rev 7397)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/build.xml 2013-09-12 16:02:04 UTC (rev 7398)
@@ -12,8 +12,10 @@
<pathelement location="${build.dir}/classes" />
<pathelement location="${build.dir}/test" />
<fileset dir="${build.lib.dir}">
- <include name="junit*.jar" />
- <include name="log4j*.jar" />
+<!-- <include name="junit*.jar" />
+ <include name="log4j*.jar" />
+ <include name="ctc-striterators*.jar" />
+ <include name="openrdf*.jar" /> -->
</fileset>
</path>
@@ -74,7 +76,7 @@
</jar>
</target>
- <target name="test" depends="compile">
+ <target name="test" depends="clean, compile">
<javac destdir="${build.dir}/test" debug="${javac.debug}" debuglevel="${javac.debuglevel}" verbose="${javac.verbose}" encoding="${javac.encoding}">
<classpath refid="test.classpath" />
<src path="${bigdata-gas.dir}/src/test" />
@@ -93,7 +95,7 @@
<!-- ant -DtestName=com.bigdata.cache.TestAll junit -->
<test name="${testName}" todir="${test.results.dir}" if="testName" />
<!-- Test suites to run when -DtestName is not set -->
- <test name="com.bigdata.gas.TestAll" todir="${test.results.dir}" unless="testName" />
+ <test name="com.bigdata.rdf.graph.TestAll" todir="${test.results.dir}" unless="testName" />
</junit>
<!-- Generate an HTML report. -->
<junitreport todir="${build.dir}">
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/lib/ctc-striterators-0.1.0.jar
===================================================================
(Binary files differ)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/lib/ctc-striterators-0.1.0.jar
___________________________________________________________________
Added: svn:mime-type
+ application/octet-stream
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/lib/slf4j-api-1.6.1.jar
===================================================================
(Binary files differ)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/lib/slf4j-api-1.6.1.jar
___________________________________________________________________
Added: svn:mime-type
+ application/octet-stream
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/lib/slf4j-log4j12-1.6.1.jar
===================================================================
(Binary files differ)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/lib/slf4j-log4j12-1.6.1.jar
___________________________________________________________________
Added: svn:mime-type
+ application/octet-stream
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/DaemonThreadFactory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/DaemonThreadFactory.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/DaemonThreadFactory.java 2013-09-12 16:02:04 UTC (rev 7398)
@@ -0,0 +1,115 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.rdf.graph.impl;
+
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+
+/**
+ * A thread factory that configures the thread as a daemon thread.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ * @version $Id: DaemonThreadFactory.java 5824 2011-12-29 20:52:02Z thompsonbry $
+ */
+/*
+ * Note: This is a clone of the same-named class in the bigdata module. The
+ * clone exists to have it under the Apache 2 license requiring the creation of
+ * a bigdata-commons module.
+ */
+class DaemonThreadFactory implements ThreadFactory {
+
+ final private ThreadFactory delegate;
+ final private String basename; // MAY be null.
+ private int counter = 0; // used iff basename was given.
+
+ private static ThreadFactory _default = new DaemonThreadFactory();
+
+ /**
+ * Returns an instance based on {@link Executors#defaultThreadFactory()}
+ * that configures the thread for daemon mode.
+ */
+ final public static ThreadFactory defaultThreadFactory() {
+
+ return _default;
+
+ }
+
+ /**
+ * Uses {@link Executors#defaultThreadFactory()} as the delegate.
+ */
+ public DaemonThreadFactory() {
+
+ this( Executors.defaultThreadFactory(), null/*basename*/ );
+
+ }
+
+ public DaemonThreadFactory(String basename) {
+
+ this(Executors.defaultThreadFactory(), basename);
+
+ }
+
+ /**
+ * Uses the specified delegate {@link ThreadFactory}.
+ *
+ * @param delegate
+ * The delegate thread factory that is responsible for creating
+ * the threads.
+ * @param basename
+ * Optional prefix that will be used to assign names to the
+ * generated threads.
+ */
+ public DaemonThreadFactory(final ThreadFactory delegate,
+ final String basename) {
+
+ if (delegate == null)
+ throw new IllegalArgumentException();
+
+ this.delegate = delegate;
+
+ this.basename = basename;
+
+ }
+
+ public Thread newThread(final Runnable r) {
+
+ final Thread t = delegate.newThread( r );
+
+ if (basename != null) {
+
+ counter++;
+
+ t.setName(basename + counter);
+
+ }
+
+ t.setDaemon(true);
+
+// System.err.println("new thread: "+t.getName()+", id="+t.getId());
+
+ return t;
+
+ }
+
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASEngine.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASEngine.java 2013-09-12 15:58:40 UTC (rev 7397)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASEngine.java 2013-09-12 16:02:04 UTC (rev 7398)
@@ -37,7 +37,6 @@
import com.bigdata.rdf.graph.IStaticFrontier;
import com.bigdata.rdf.graph.impl.frontier.StaticFrontier2;
import com.bigdata.rdf.graph.impl.scheduler.CHMScheduler;
-import com.bigdata.util.concurrent.DaemonThreadFactory;
/**
* {@link IGASEngine} for dynamic activation of vertices. This implementation
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASStats.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASStats.java 2013-09-12 15:58:40 UTC (rev 7397)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASStats.java 2013-09-12 16:02:04 UTC (rev 7398)
@@ -16,8 +16,8 @@
package com.bigdata.rdf.graph.impl;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
-import com.bigdata.counters.CAT;
import com.bigdata.rdf.graph.IGASStats;
import com.bigdata.rdf.graph.util.GASUtil;
@@ -28,10 +28,10 @@
*/
public class GASStats implements IGASStats {
- private final CAT nrounds = new CAT();
- private final CAT frontierSize = new CAT();
- private final CAT nedges = new CAT();
- private final CAT elapsedNanos = new CAT();
+ private final AtomicLong nrounds = new AtomicLong();
+ private final AtomicLong frontierSize = new AtomicLong();
+ private final AtomicLong nedges = new AtomicLong();
+ private final AtomicLong elapsedNanos = new AtomicLong();
/* (non-Javadoc)
* @see com.bigdata.rdf.graph.impl.IFOO#add(long, long, long)
@@ -40,13 +40,13 @@
public void add(final long frontierSize, final long nedges,
final long elapsedNanos) {
- this.nrounds.increment();
+ this.nrounds.incrementAndGet();
- this.frontierSize.add(frontierSize);
+ this.frontierSize.addAndGet(frontierSize);
- this.nedges.add(nedges);
+ this.nedges.addAndGet(nedges);
- this.elapsedNanos.add(elapsedNanos);
+ this.elapsedNanos.addAndGet(elapsedNanos);
}
@@ -56,13 +56,13 @@
@Override
public void add(final IGASStats o) {
- nrounds.add(o.getNRounds());
+ nrounds.addAndGet(o.getNRounds());
- frontierSize.add(o.getFrontierSize());
+ frontierSize.addAndGet(o.getFrontierSize());
- nedges.add(o.getNEdges());
+ nedges.addAndGet(o.getNEdges());
- elapsedNanos.add(o.getElapsedNanos());
+ elapsedNanos.addAndGet(o.getElapsedNanos());
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASEngine.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASEngine.java 2013-09-12 15:58:40 UTC (rev 7397)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASEngine.java 2013-09-12 16:02:04 UTC (rev 7398)
@@ -35,7 +35,6 @@
import com.bigdata.rdf.graph.IGraphAccessor;
import com.bigdata.rdf.graph.impl.GASEngine;
import com.bigdata.rdf.graph.impl.util.VertexDistribution;
-import com.bigdata.rdf.sail.Sesame2BigdataIterator;
import cutthecrap.utils.striterators.EmptyIterator;
import cutthecrap.utils.striterators.IStriterator;
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/Sesame2BigdataIterator.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/Sesame2BigdataIterator.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/Sesame2BigdataIterator.java 2013-09-12 16:02:04 UTC (rev 7398)
@@ -0,0 +1,119 @@
+/**
+ Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+/*
+ * Created on Sep 4, 2008
+ */
+
+package com.bigdata.rdf.graph.impl.sail;
+
+import java.util.NoSuchElementException;
+
+import info.aduna.iteration.CloseableIteration;
+
+import cutthecrap.utils.striterators.ICloseableIterator;
+
+/**
+ * Class aligns a Sesame 2 {@link CloseableIteration} with a bigdata
+ * {@link ICloseableIterator}.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ * @version $Id: Bigdata2SesameIteration.java 2265 2009-10-26 12:51:06Z
+ * thompsonbry $
+ * @param <T>
+ * The generic type of the visited elements.
+ * @param <E>
+ * The generic type of the exceptions thrown by the Sesame 2
+ * {@link CloseableIteration}.
+ */
+/*
+ * Note: This is a clone of the same-named class in the bigdata-rdf module. The
+ * clone exists to have it under the Apache 2 license without going through a
+ * large relayering of the dependencies.
+ */
+class Sesame2BigdataIterator<T, E extends Exception> implements
+ ICloseableIterator<T> {
+
+ private final CloseableIteration<? extends T,E> src;
+
+ private volatile boolean open = true;
+
+ public Sesame2BigdataIterator(final CloseableIteration<? extends T,E> src) {
+
+ if (src == null)
+ throw new IllegalArgumentException();
+
+ this.src = src;
+
+ }
+
+ public void close() {
+
+ if (open) {
+ open = false;
+ try {
+ src.close();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ }
+
+ public boolean hasNext() {
+
+ try {
+
+ if (open && src.hasNext())
+ return true;
+
+ close();
+
+ return false;
+
+ } catch(Exception e) {
+ throw new RuntimeException(e);
+ }
+
+ }
+
+ public T next() {
+
+ if (!hasNext()) {
+ throw new NoSuchElementException();
+ }
+
+ try {
+ return src.next();
+ } catch(Exception e) {
+ throw new RuntimeException(e);
+ }
+
+ }
+
+ public void remove() {
+
+ if(!open)
+ throw new IllegalStateException();
+
+ try {
+ src.remove();
+ } catch(Exception e) {
+ throw new RuntimeException(e);
+ }
+
+ }
+
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/scheduler/STScheduler.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/scheduler/STScheduler.java 2013-09-12 15:58:40 UTC (rev 7397)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/scheduler/STScheduler.java 2013-09-12 16:02:04 UTC (rev 7398)
@@ -33,8 +33,8 @@
/**
* The scheduled vertices.
- */// Note: package private. Exposed to TLScheduler.
- /*private*/ final Set<Value> vertices;
+ */
+ private final Set<Value> vertices;
private final boolean sortFrontier;
public STScheduler(final GASEngine gasEngine) {
@@ -44,6 +44,24 @@
}
+ /**
+ * The #of vertices in the frontier.
+ */
+ public int size() {
+
+ return vertices.size();
+
+ }
+
+ /**
+ * The backing collection.
+ */
+ public Set<Value> getVertices() {
+
+ return vertices;
+
+ }
+
@Override
public void schedule(final Value v) {
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/scheduler/TLScheduler.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/scheduler/TLScheduler.java 2013-09-12 15:58:40 UTC (rev 7397)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/scheduler/TLScheduler.java 2013-09-12 16:02:04 UTC (rev 7398)
@@ -1,275 +0,0 @@
-/**
- Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-package com.bigdata.rdf.graph.impl.scheduler;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-
-import org.openrdf.model.Value;
-
-import com.bigdata.rdf.graph.IGASScheduler;
-import com.bigdata.rdf.graph.IGASSchedulerImpl;
-import com.bigdata.rdf.graph.IStaticFrontier;
-import com.bigdata.rdf.graph.impl.GASEngine;
-import com.bigdata.rdf.graph.impl.bd.MergeSortIterator;
-import com.bigdata.rdf.graph.impl.util.GASImplUtil;
-import com.bigdata.rdf.graph.impl.util.IArraySlice;
-import com.bigdata.rdf.graph.impl.util.ManagedArray;
-import com.bigdata.rdf.graph.util.GASUtil;
-
-/**
- * This scheduler uses thread-local buffers ({@link LinkedHashSet}) to track the
- * distinct vertices scheduled by each execution thread. After the computation
- * round, those per-thread segments of the frontier are combined into a single
- * global, compact, and ordered frontier. To maximize the parallel activity, the
- * per-thread frontiers are sorted using N threads (one per segment). Finally,
- * the frontier segments are combined using a {@link MergeSortIterator} - this
- * is a sequential step with a linear cost in the size of the frontier.
- *
- * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- *
- * TODO Discard if dominated by {@link TLScheduler2}.
- */
-public class TLScheduler implements IGASSchedulerImpl {
-
- /**
- * Class bundles a reusable, extensible array for sorting the thread-local
- * frontier.
- *
- * @author <a href="mailto:tho...@us...">Bryan
- * Thompson</a>
- */
- private static class MySTScheduler extends STScheduler {
-
- /**
- * This is used to sort the thread-local frontier (that is, the frontier
- * for a single thread). The backing array will grow as necessary and is
- * reused in each round.
- * <P>
- * Note: The schedule (for each thread) is using a set - see the
- * {@link STScheduler} base class. This means that the schedule (for
- * each thread) is compact, but not ordered. We need to use (and re-use)
- * an array to order that compact per-thread schedule. The compact
- * per-thread schedules are then combined into a single compact frontier
- * for the new round.
- */
- private final ManagedArray<Value> tmp;
-
- public MySTScheduler(final GASEngine gasEngine) {
-
- super(gasEngine);
-
- tmp = new ManagedArray<Value>(Value.class, 64);
-
- }
-
- } // class MySTScheduler
-
- private final GASEngine gasEngine;
- private final int nthreads;
- private final ConcurrentHashMap<Long/* threadId */, MySTScheduler> map;
-
- public TLScheduler(final GASEngine gasEngine) {
-
- this.gasEngine = gasEngine;
-
- this.nthreads = gasEngine.getNThreads();
-
- this.map = new ConcurrentHashMap<Long, MySTScheduler>(
- nthreads/* initialCapacity */, .75f/* loadFactor */, nthreads);
-
- }
-
- private IGASScheduler threadLocalScheduler() {
-
- final Long id = Thread.currentThread().getId();
-
- MySTScheduler s = map.get(id);
-
- if (s == null) {
-
- final IGASScheduler old = map.putIfAbsent(id, s = new MySTScheduler(
- gasEngine));
-
- if (old != null) {
-
- /*
- * We should not have a key collision since this is based on the
- * threadId.
- */
-
- throw new AssertionError();
-
- }
-
- }
-
- return s;
-
- }
-
- @Override
- public void schedule(final Value v) {
-
- threadLocalScheduler().schedule(v);
-
- }
-
- @Override
- public void clear() {
-
- /*
- * Clear the per-thread maps, but do not discard. They will be reused in
- * the next round.
- *
- * Note: This is a big cost. Simply clearing [map] results in much less
- * time and less GC.
- */
-// for (STScheduler s : map.values()) {
-//
-// s.clear();
-//
-// }
- map.clear();
- }
-
- @Override
- public void compactFrontier(final IStaticFrontier frontier) {
-
- /*
- * Extract a sorted, compact frontier from each thread local frontier.
- */
- @SuppressWarnings("unchecked")
- final IArraySlice<Value>[] frontiers = new IArraySlice[nthreads];
-
- int nsources = 0;
- int nvertices = 0;
- {
- final List<Callable<IArraySlice<Value>>> tasks = new ArrayList<Callable<IArraySlice<Value>>>(
- nthreads);
-
- for (MySTScheduler s : map.values()) {
- final MySTScheduler t = s;
- tasks.add(new Callable<IArraySlice<Value>>() {
- @Override
- public IArraySlice<Value> call() throws Exception {
- return GASImplUtil.compactAndSort(t.vertices, t.tmp);
- }
- });
-
- }
- // invokeAll() - futures will be done() before it returns.
- final List<Future<IArraySlice<Value>>> futures;
- try {
- futures = gasEngine.getGASThreadPool().invokeAll(tasks);
- } catch (InterruptedException e) {
- throw new RuntimeException(e);
- }
-
- for (Future<IArraySlice<Value>> f : futures) {
-
- try {
- final IArraySlice<Value> b = frontiers[nsources] = f.get();
- nvertices += b.len();
- nsources++;
- } catch (InterruptedException e) {
- throw new RuntimeException(e);
- } catch (ExecutionException e) {
- throw new RuntimeException(e);
- }
-
- }
- }
-
- if (nvertices == 0) {
-
- /*
- * The new frontier is empty.
- */
-
- frontier.resetFrontier(0/* minCapacity */, false/* sortFrontier */,
- GASUtil.EMPTY_VERTICES_ITERATOR);
-
- return;
-
- }
-
- if (nsources > nthreads) {
-
- /*
- * nsources could be LT nthreads if we have a very small frontier,
- * but it should never be GTE nthreads.
- */
-
- throw new AssertionError("nsources=" + nsources + ", nthreads="
- + nthreads);
-
- }
-
- /*
- * Now merge sort those arrays and populate the new frontier.
- */
- mergeSortSourcesAndSetFrontier(nsources, nvertices, frontiers, frontier);
-
- }
-
- /**
- * Now merge sort the ordered frontier segments and populate the new
- * frontier.
- *
- * @param nsources
- * The #of frontier segments.
- * @param nvertices
- * The total #of vertice across those segments (may double-count
- * across segments).
- * @param frontiers
- * The ordered, compact frontier segments
- * @param frontier
- * The new frontier to be populated.
- */
- private void mergeSortSourcesAndSetFrontier(final int nsources,
- final int nvertices, final IArraySlice<Value>[] frontiers,
- final IStaticFrontier frontier) {
-
- // wrap Values[] as Iterators.
- @SuppressWarnings("unchecked")
- final Iterator<Value>[] itrs = new Iterator[nsources];
-
- for (int i = 0; i < nsources; i++) {
-
- itrs[i] = frontiers[i].iterator();
-
- }
-
- // merge sort of those iterators.
- final Iterator<Value> itr = new MergeSortIterator(itrs);
-
- /*
- * Note: The merge iterator visits the vertices in the natural order and
- * does not need to be sorted.
- */
- frontier.resetFrontier(nvertices/* minCapacity */,
- false/* sortFrontier */, itr);
-
- }
-
-}
\ No newline at end of file
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/scheduler/TLScheduler2.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/scheduler/TLScheduler2.java 2013-09-12 15:58:40 UTC (rev 7397)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/scheduler/TLScheduler2.java 2013-09-12 16:02:04 UTC (rev 7398)
@@ -1,306 +0,0 @@
-/**
- Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-package com.bigdata.rdf.graph.impl.scheduler;
-
-import java.util.ArrayList;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-
-import org.apache.log4j.Logger;
-import org.openrdf.model.Value;
-
-import com.bigdata.rdf.graph.IGASScheduler;
-import com.bigdata.rdf.graph.IGASSchedulerImpl;
-import com.bigdata.rdf.graph.IStaticFrontier;
-import com.bigdata.rdf.graph.impl.GASEngine;
-import com.bigdata.rdf.graph.impl.bd.MergeSortIterator;
-import com.bigdata.rdf.graph.impl.frontier.StaticFrontier2;
-import com.bigdata.rdf.graph.impl.util.GASImplUtil;
-import com.bigdata.rdf.graph.impl.util.IArraySlice;
-import com.bigdata.rdf.graph.impl.util.ManagedArray;
-import com.bigdata.rdf.graph.util.GASUtil;
-
-/**
- * This scheduler uses thread-local buffers ({@link LinkedHashSet}) to track the
- * distinct vertices scheduled by each execution thread. After the computation
- * round, those per-thread segments of the frontier are combined into a single
- * global, compact, and ordered frontier. To maximize the parallel activity, the
- * per-thread frontiers are sorted using N threads (one per segment). Finally,
- * the frontier segments are combined using a {@link MergeSortIterator} - this
- * is a sequential step with a linear cost in the size of the frontier.
- *
- * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- */
-public class TLScheduler2 implements IGASSchedulerImpl {
-
- private static final Logger log = Logger.getLogger(TLScheduler2.class);
-
- /**
- * Class bundles a reusable, extensible array for sorting the thread-local
- * frontier.
- *
- * @author <a href="mailto:tho...@us...">Bryan
- * Thompson</a>
- */
- private static class MySTScheduler extends STSche...
[truncated message content] |
|
From: <mar...@us...> - 2013-09-12 17:08:07
|
Revision: 7401
http://bigdata.svn.sourceforge.net/bigdata/?rev=7401&view=rev
Author: martyncutcher
Date: 2013-09-12 17:07:59 +0000 (Thu, 12 Sep 2013)
Log Message:
-----------
Amend HALogNexus to add an accessor increment pattern to guard against file removal - initially used by DumpLogDigest.
And amend DumpLogDigest to access all joined services to determine the union of the range of halogs to test.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/DumpLogDigests.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-09-12 17:03:57 UTC (rev 7400)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-09-12 17:07:59 UTC (rev 7401)
@@ -563,7 +563,7 @@
/**
* Close the file (does not flush).
- */
+ */
private void close() throws IOException { // Note: caller owns m_stateLock!
try {
if (m_state != null) {
@@ -611,7 +611,7 @@
*
* @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/695">
* HAJournalServer reports "follower" but is in SeekConsensus and is
- * not participating in commits\xA7</a>
+ * not participating in commits</a>
*/
private void remove() throws IOException {
@@ -817,8 +817,8 @@
// One less reader/writer.
--m_accessors;
if (m_accessors == 0) {
- if (haLog.isDebugEnabled())
- haLog.debug("Closing file");
+ if (haLog.isInfoEnabled())
+ haLog.info("Closing file", new StackInfoReport());
/*
* Note: Close the RandomAccessFile rather than the
* FileChannel. Potential fix for leaking open file
@@ -938,7 +938,7 @@
// Note: Must be synchronized for visibility and atomicity!
synchronized (m_state) {
- m_state.m_accessors++;
+ m_state.m_accessors++;
}
@@ -1051,7 +1051,7 @@
*/
synchronized(m_state) {
- if(m_state.m_accessors == 0) {
+ if (m_state.m_accessors == 0) {
/**
* TODO This is a bit of a hack. The problem is that
@@ -1070,9 +1070,9 @@
}
- m_state.close();
+ m_state.close();
+ }
}
- }
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/DumpLogDigests.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/DumpLogDigests.java 2013-09-12 17:03:57 UTC (rev 7400)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/DumpLogDigests.java 2013-09-12 17:07:59 UTC (rev 7401)
@@ -118,99 +118,146 @@
return dump(serviceRoot, DEFAULT_BATCH, DEFAULT_SERVICE_THREADS);
}
- public Iterator<ServiceLogs> dump(final String serviceRoot, final int batchSize, final int serviceThreads) throws IOException, ExecutionException {
- try {
- // wait for zk services to register!
- Thread.sleep(1000);
-
- List<HAGlue> services = services(serviceRoot);
-
- if (services.isEmpty())
- throw new IllegalArgumentException("No services found for " + serviceRoot);
-
- // Start by grabbing a nominal service to pin the logs
- final HAGlue pinner = services.get(0);
-
- final LogDigestParams params = pinner.submit(new PinLogs(), false).get();
-
- if (log.isInfoEnabled())
- log.info("Pinning startCC: " + params.startCC + ", endCC: " + params.endCC + ", last snapshot: " + params.snapshotCC);
-
- /**
- * Now access serviceIDs so that we can use discovery to gain HAGlue interface.
- *
- * Submit all requests for concurrent processing, then add results
- */
- List<Future<List<HALogInfo>>> results = new ArrayList<Future<List<HALogInfo>>>();
- long batchStart = params.startCC;
- long batchEnd = batchStart + batchSize - 1;
- int tasks = 0;
- while (true) {
- if (batchEnd > params.endCC)
- batchEnd = params.endCC;
-
- if (log.isInfoEnabled())
- log.info("Running batch start: " + batchStart + ", end: " + batchEnd + " across " + services);
-
- for (final HAGlue glue : services) {
-
- results.add(glue.submit(new GetLogInfo(batchStart, batchEnd, serviceThreads), false));
-
- tasks++;
- }
-
- if (batchEnd == params.endCC)
- break;
-
- batchStart += batchSize;
- batchEnd += batchSize;
- }
-
- final ArrayList<ServiceLogWait> logs = new ArrayList<ServiceLogWait>();
- for (int t = 0; t < tasks; t++) {
- final int s = t % services.size();
- logs.add(new ServiceLogWait(services.get(s).getServiceUUID().toString(), results.get(t), s, services.size()));
- }
-
- // now submit task to release the pinning transaction and wait for it to complete
- pinner.submit(new UnpinLogs(params.tx), false).get();
-
- // return an Iterator blocking on the Future value of the next source item before
- // creating a return value
- return new Iterator<ServiceLogs>() {
- final Iterator<ServiceLogWait> src = logs.iterator();
-
- @Override
- public boolean hasNext() {
- return src.hasNext();
- }
+ public Iterator<ServiceLogs> dump(final String serviceRoot,
+ final int batchSize, final int serviceThreads) throws IOException,
+ ExecutionException {
+ try {
+ // wait for zk services to register should no longer be necessary
+ // Thread.sleep(1000);
- @Override
- public ServiceLogs next() {
- final ServiceLogWait data = src.next();
+ // retrieve a list of joined services
+ List<HAGlue> services = services(serviceRoot);
+
+ if (services.isEmpty())
+ throw new IllegalArgumentException("No services found for "
+ + serviceRoot);
+
+ // Retrieve a LogDigestParmas for each service with a PinLogs task
+ // Retrieve in sequential order and use a try finally pattern to
+ // invoke UnpinLogs for everything that was pinned.
+ //
+ // Note that the new accessor pattern is used to increment the
+ // access count in GetLogInfo so this ensures that while concurrent
+ // digest tasks are running the logs will not be removed.
+ final ArrayList<HAGlue> pinners = new ArrayList<HAGlue>();
+ try {
+
+ long startCC = -1;
+ long endCC = -1;
+
+ for (HAGlue pinner : services) {
+ final LogDigestParams params = pinner.submit(new PinLogs(),
+ false).get();
+
+ if (log.isInfoEnabled())
+ log.info("Pinning startCC: " + params.startCC + ", endCC: "
+ + params.endCC + ", last snapshot: "
+ + params.snapshotCC);
- try {
- // This will block on the future.get()
- return new ServiceLogs(data.service, data.waitlogInfos.get(), data.item, data.batch);
- } catch (InterruptedException e) {
- throw new RuntimeException(e);
- } catch (ExecutionException e) {
- throw new RuntimeException(e);
+ if (params.startCC != -1) { // there are logs available
+ if (startCC == -1 || startCC > params.startCC){
+ startCC = params.startCC;
+ }
+ if (endCC < params.endCC){
+ endCC = params.endCC;
+ }
}
+
+ // only added if PinLogs is successful
+ pinners.add(pinner); // add as pinner to be unpinned later
}
+
+ /**
+ * Now access serviceIDs so that we can use discovery to gain
+ * HAGlue interface.
+ *
+ * Submit all requests for concurrent processing, then add
+ * results
+ */
+ List<Future<List<HALogInfo>>> results = new ArrayList<Future<List<HALogInfo>>>();
+ long batchStart = startCC;
+ long batchEnd = batchStart + batchSize - 1;
+ int tasks = 0;
+ while (true) {
+ if (batchEnd > endCC)
+ batchEnd = endCC;
- @Override
- public void remove() {
- throw new UnsupportedOperationException();
+ if (log.isInfoEnabled())
+ log.info("Running batch start: " + batchStart
+ + ", end: " + batchEnd + " across " + services);
+
+ for (final HAGlue glue : services) {
+
+ results.add(glue.submit(new GetLogInfo(batchStart,
+ batchEnd, serviceThreads), false));
+
+ tasks++;
+ }
+
+ if (batchEnd == endCC)
+ break;
+
+ batchStart += batchSize;
+ batchEnd += batchSize;
}
- };
-
+
+ final ArrayList<ServiceLogWait> logs = new ArrayList<ServiceLogWait>();
+ for (int t = 0; t < tasks; t++) {
+ final int s = t % services.size();
+ logs.add(new ServiceLogWait(services.get(s)
+ .getServiceUUID().toString(), results.get(t), s,
+ services.size()));
+ }
+
+ // return an Iterator blocking on the Future value of the next
+ // source item before
+ // creating a return value
+ return new Iterator<ServiceLogs>() {
+ final Iterator<ServiceLogWait> src = logs.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return src.hasNext();
+ }
+
+ @Override
+ public ServiceLogs next() {
+ final ServiceLogWait data = src.next();
+
+ try {
+ // This will block on the future.get()
+ return new ServiceLogs(data.service,
+ data.waitlogInfos.get(), data.item,
+ data.batch);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ } catch (ExecutionException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+ };
+
+ } finally {
+ for (final HAGlue pinner : pinners) {
+ try {
+ pinner.submit(new UnpinLogs(), false);
+ } catch (Throwable t) {
+ log.error("Problem submitting UnpinLogs", t);
+ }
+ }
+
+ }
} catch (InterruptedException e) {
throw new RuntimeException(e);
} catch (KeeperException e) {
throw new RuntimeException(e);
}
- }
+ }
/**
* LogDigestParams with PinLogs and UnpinLogs tasks ensure that
@@ -226,13 +273,11 @@
*/
@SuppressWarnings("serial")
static public class LogDigestParams implements Serializable {
- final public long tx;
final public long startCC;
final public long endCC;
final public long snapshotCC;
- LogDigestParams(final long tx, final long startCC, final long endCC, long sscc) {
- this.tx = tx;
+ LogDigestParams(final long startCC, final long endCC, long sscc) {
this.startCC = startCC;
this.endCC = endCC;
this.snapshotCC = sscc;
@@ -246,10 +291,6 @@
public LogDigestParams call() throws Exception {
final HAJournal ha = (HAJournal) this.getIndexManager();
- final ITransactionService ts = ha.getTransactionService();
- final long relTime = ts.getReleaseTime();
- final long tx = ts.newTx(relTime+1);
-
final HALogNexus nexus = ha.getHALogNexus();
Iterator<IHALogRecord> logs = nexus.getHALogs();
final long startCC;
@@ -264,28 +305,23 @@
final ISnapshotRecord rec = ssmgr.getNewestSnapshot();
final long sscc = rec != null ? rec.getCommitCounter() : -1;
+ nexus.addAccessor();
+
// return new LogDigestParams(tx, startCC-3, endCC+3); // try asking for more logs than available
- return new LogDigestParams(tx, startCC, endCC, sscc);
+ return new LogDigestParams(startCC, endCC, sscc);
}
}
@SuppressWarnings("serial")
static class UnpinLogs extends IndexManagerCallable<Void> {
- long tx;
-
- UnpinLogs(long tx) {
- this.tx = tx;
- }
-
- @Override
+
+ @Override
public Void call() throws Exception {
final HAJournal ha = (HAJournal) this.getIndexManager();
- final ITransactionService ts = ha.getTransactionService();
+ ha.getHALogNexus().releaseAccessor();
- ts.abort(tx);
-
return null;
}
@@ -322,66 +358,70 @@
HAJournal ha = (HAJournal) this.getIndexManager();
final HALogNexus nexus = ha.getHALogNexus();
- nexus.protectDigest(startCC);
+ nexus.addAccessor();
try {
- long openCC = nexus.getCommitCounter();
- log.warn("Open Commit Counter: " + openCC + ", startCC: " + startCC + ", endCC: " + endCC);
-
- /**
- * Submit each computation as task to pooled executor service - say maximum of
- * five threads
- */
- final ThreadPoolExecutor es = (ThreadPoolExecutor) Executors
- .newFixedThreadPool(serviceThreads);
+ long openCC = nexus.getCommitCounter();
+ log.warn("Open Commit Counter: " + openCC + ", startCC: "
+ + startCC + ", endCC: " + endCC);
- final List<Future<Void>> results = new ArrayList<Future<Void>>();
-
- for (long cc = startCC; cc <= endCC; cc++) {
- final long cur = cc;
-
- final Future<Void> res = es.submit(new Callable<Void>() {
- @Override
- public Void call() throws Exception {
- try {
- final File file = nexus.getHALogFile(cur);
-
- log.warn("Found log file: " + file.getName());
-
- // compute file digest
- final IHALogReader r = nexus.getReader(cur);
-
- final MessageDigest digest = MessageDigest.getInstance("MD5");
-
- r.computeDigest(digest);
-
- infos.add(new HALogInfo(cur, r.isLive(), digest.digest()));
- } catch (FileNotFoundException fnf) {
- // permitted
- infos.add(new HALogInfo(cur, false, null /*digest*/));
- } catch (Throwable t) {
- log.warn("Unexpected error", t);
-
- // FIXME: what to do here?
- infos.add(new HALogInfo(cur, false, "ERROR".getBytes()));
+ /**
+ * Submit each computation as task to pooled executor service -
+ * say maximum of five threads
+ */
+ final ThreadPoolExecutor es = (ThreadPoolExecutor) Executors
+ .newFixedThreadPool(serviceThreads);
+
+ final List<Future<Void>> results = new ArrayList<Future<Void>>();
+
+ for (long cc = startCC; cc <= endCC; cc++) {
+ final long cur = cc;
+
+ final Future<Void> res = es.submit(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ try {
+ final File file = nexus.getHALogFile(cur);
+
+ log.warn("Found log file: " + file.getName());
+
+ // compute file digest
+ final IHALogReader r = nexus.getReader(cur);
+
+ final MessageDigest digest = MessageDigest
+ .getInstance("MD5");
+
+ r.computeDigest(digest);
+
+ infos.add(new HALogInfo(cur, r.isLive(), digest
+ .digest()));
+ } catch (FileNotFoundException fnf) {
+ // permitted
+ infos.add(new HALogInfo(cur, false, null /* digest */));
+ } catch (Throwable t) {
+ log.warn("Unexpected error", t);
+
+ // FIXME: what to do here?
+ infos.add(new HALogInfo(cur, false, "ERROR"
+ .getBytes()));
+ }
+
+ return null;
}
-
- return null;
- }
-
- });
-
- results.add(res);
- }
-
- for (Future<Void> res : results) {
- res.get();
- }
-
- es.shutdown();
-
- return new ArrayList<HALogInfo>(infos);
+
+ });
+
+ results.add(res);
+ }
+
+ for (Future<Void> res : results) {
+ res.get();
+ }
+
+ es.shutdown();
+
+ return new ArrayList<HALogInfo>(infos);
} finally {
- nexus.releaseProtectDigest();
+ nexus.releaseAccessor();
}
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-09-12 17:03:57 UTC (rev 7400)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-09-12 17:07:59 UTC (rev 7401)
@@ -3696,7 +3696,7 @@
}); // runWithBarrierLock()
if (haLog.isInfoEnabled())
- haLog.info("TRANSITION", new RuntimeException());
+ haLog.info("TRANSITION", new StackInfoReport());
// Transition to RunMet.
enterRunState(new RunMetTask(token, leaderId));
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java 2013-09-12 17:03:57 UTC (rev 7400)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java 2013-09-12 17:07:59 UTC (rev 7401)
@@ -32,6 +32,7 @@
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Iterator;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
@@ -120,7 +121,7 @@
* Set to protect log files against deletion while a digest is
* computed. This is checked by deleteHALogs.
*/
- private final AtomicLong digestLog = new AtomicLong(-1L);
+ private final AtomicInteger logAccessors = new AtomicInteger();
/**
* Filter visits all HALog files <strong>except</strong> the current HALog
@@ -819,15 +820,21 @@
* Protects logs from removal while a digest is being computed
* @param earliestDigest
*/
- void protectDigest(final long earliestDigest) {
- digestLog.set(earliestDigest);
+ void addAccessor() {
+ if (logAccessors.incrementAndGet() == 1) {
+ if (log.isInfoEnabled())
+ log.info("Access protection added");
+ }
}
/**
* Releases current protection against log removal
*/
- void releaseProtectDigest() {
- digestLog.set(-1L);
+ void releaseAccessor() {
+ if (logAccessors.decrementAndGet() == 0) {
+ if (log.isInfoEnabled())
+ log.info("Access protection removed");
+ }
}
/**
@@ -848,19 +855,13 @@
final Iterator<IHALogRecord> itr = getHALogs();
- while(itr.hasNext()) {
+ while(itr.hasNext() && logAccessors.get() == 0) {
final IHALogRecord r = itr.next();
final long closingCommitCounter = r.getCommitCounter();
- final boolean deleteFile;
- if (closingCommitCounter < earliestRetainedSnapshotCommitCounter) {
- // now check if protected by the digestLog field (set to -1 if not active)
- deleteFile = digestLog.get() == -1 || closingCommitCounter < digestLog.get();
- } else {
- deleteFile = false;
- }
+ final boolean deleteFile = closingCommitCounter < earliestRetainedSnapshotCommitCounter;
if (!deleteFile) {
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2013-09-12 21:01:05
|
Revision: 7402
http://bigdata.svn.sourceforge.net/bigdata/?rev=7402&view=rev
Author: thompsonbry
Date: 2013-09-12 21:00:56 +0000 (Thu, 12 Sep 2013)
Log Message:
-----------
Added a very simple graph model for GAS graph mining. It uses a collection of vertices and linked hash sets for the in-edges, out-edges, and property values. This passes the basic "TestGather". I have written an extension to load data into this. It is ready for testing against a larger data set. The purpose is to replace the memory sail, which is way to slow when loading the data, and provide a basis for understanding the overhead associated with graph traversal versus object decoding as a pre-cursor to an exploration of column-wise indexing for graph mining.
See #629 (Graph Mining API)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASRunner.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/util/AbstractGraphFixture.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/util/GASUtil.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASRunner.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASEngine.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASRunner.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGraphFixture.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGraphLoader.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/package.html
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/util/GraphLoader.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/util/SailGraphLoader.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/impl/ram/
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/impl/ram/AbstractRAMGraphTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/impl/ram/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/impl/ram/TestGather.java
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASEngine.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASEngine.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASEngine.java 2013-09-12 21:00:56 UTC (rev 7402)
@@ -0,0 +1,360 @@
+/**
+ Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+package com.bigdata.rdf.graph.impl.ram;
+
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.openrdf.model.Resource;
+import org.openrdf.model.Statement;
+import org.openrdf.model.URI;
+import org.openrdf.model.Value;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.ValueFactoryImpl;
+import org.openrdf.sail.SailException;
+
+import com.bigdata.rdf.graph.EdgesEnum;
+import com.bigdata.rdf.graph.IGASContext;
+import com.bigdata.rdf.graph.IGASProgram;
+import com.bigdata.rdf.graph.IGraphAccessor;
+import com.bigdata.rdf.graph.impl.GASEngine;
+import com.bigdata.rdf.graph.impl.util.VertexDistribution;
+
+import cutthecrap.utils.striterators.EmptyIterator;
+import cutthecrap.utils.striterators.IStriterator;
+import cutthecrap.utils.striterators.Striterator;
+
+public class RAMGASEngine extends GASEngine {
+
+ public RAMGASEngine(int nthreads) {
+ super(nthreads);
+ }
+
+ /**
+ * Returns <code>false</code>. There is no intrinsic ordering that can
+ * improve access for this implementation.
+ */
+ @Override
+ public boolean getSortFrontier() {
+ return false;
+ }
+
+ /**
+ * A simple RDF graph model suitable for graph mining algorithms.
+ *
+ * TODO This model does not support link weights. It was developed to
+ * provide an implementation without any object encode/decode overhead that
+ * could be used to explore the possible performance of GAS algorithms under
+ * Java.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan
+ * Thompson</a>
+ */
+ static public class RAMGraph {
+
+ private final ValueFactory vf;
+ public ValueFactory getValueFactory() {
+ return vf;
+ }
+
+ /**
+ * From a vertex, we can visit the in-edges, out-edges, or attribute
+ * values. These things are organized into three sets of statements. A
+ * non-thread-safe collection is used to provide the distinct semantics
+ * for those sets and fast traversal. This design precludes the ability
+ * to concurrently modify the graph during graph traversal operations.
+ */
+ static private class Vertex {
+
+ /** The {@link Value} for that {@link Vertex}. */
+ final private Value v;
+ /**
+ * The distinct set of in-edges for this {@link Vertex}.
+ * <p>
+ * The {@link Statement#getObject()} for each {@link Statement} in
+ * this collection will be the {@link #v}.
+ */
+ private Set<Statement> inEdges = null;
+ /**
+ * The distinct set of out-edges for this {@link Vertex}.
+ * <p>
+ * The {@link Statement#getSubject()} for each {@link Statement} in
+ * this collection will be the {@link #v}.
+ */
+ private Set<Statement> outEdges = null;
+ /**
+ * The distinct set of property values for this {@link Vertex}.
+ * <p>
+ * The {@link Statement#getSubject()} for each {@link Statement} in
+ * this collection will be the {@link #v}.
+ * <p>
+ * The {@link Statement#getObject()} for each {@link Statement} in
+ * this collection will be a {@link URI}.
+ */
+ private Set<Statement> attribs = null;
+
+ public Vertex(final Value v) {
+ if (v == null)
+ throw new NullPointerException();
+ this.v = v;
+ }
+ @Override
+ public String toString() {
+ return "Vertex{" + v + ",inEdges=" + getInEdgeCount()
+ + ",outEdges=" + getOutEdgeCount() + ",attribs="
+ + getAttribCount() + "}";
+ }
+
+ private boolean addAttrib(final Statement s) {
+ if (attribs == null) {
+ attribs = new LinkedHashSet<Statement>();
+ }
+ return attribs.add(s);
+ }
+
+ private boolean addOutEdge(final Statement s) {
+ if (outEdges == null) {
+ outEdges = new LinkedHashSet<Statement>();
+ }
+ return outEdges.add(s);
+ }
+
+ private boolean addInEdge(final Statement s) {
+ if (inEdges == null) {
+ inEdges = new LinkedHashSet<Statement>();
+ }
+ return inEdges.add(s);
+ }
+
+ public int getAttribCount() {
+ return attribs == null ? 0 : attribs.size();
+ }
+
+ public int getInEdgeCount() {
+ return inEdges == null ? 0 : inEdges.size();
+ }
+
+ public int getOutEdgeCount() {
+ return outEdges == null ? 0 : outEdges.size();
+ }
+
+ public Iterator<Statement> inEdges() {
+ if (inEdges == null)
+ return EmptyIterator.DEFAULT;
+ return inEdges.iterator();
+ }
+
+ public Iterator<Statement> outEdges() {
+ if (outEdges == null)
+ return EmptyIterator.DEFAULT;
+ return outEdges.iterator();
+ }
+
+ public Iterator<Statement> attribs() {
+ if (attribs == null)
+ return EmptyIterator.DEFAULT;
+ return attribs.iterator();
+ }
+
+ }
+
+ /**
+ * The vertices.
+ */
+ private final ConcurrentMap<Value,Vertex> vertices;
+
+ public RAMGraph() {
+
+ vertices = new ConcurrentHashMap<Value, Vertex>();
+
+ vf = new ValueFactoryImpl();
+
+ }
+
+ /**
+ * Lookup / create a vertex.
+ *
+ * @param x
+ * The {@link Value}.
+ * @param create
+ * when <code>true</code> the {@link Vertex} will be created
+ * if it does not exist.
+ *
+ * @return The {@link Vertex}.
+ */
+ private Vertex get(final Value x, final boolean create) {
+
+ Vertex v = vertices.get(x);
+
+ if (v == null && create) {
+
+ final Vertex oldVal = vertices
+ .putIfAbsent(x, v = new Vertex(x));
+
+ if (oldVal != null) {
+
+ // lost data race.
+ v = oldVal;
+
+ }
+
+ }
+
+ return v;
+
+ }
+
+ public boolean add(final Statement st) {
+
+ final Resource s = st.getSubject();
+
+ final Value o = st.getObject();
+
+ boolean modified = false;
+ if (o instanceof URI) {
+ // Edge
+ modified|=get(s, true/* create */).addOutEdge(st);
+ modified|=get(o, true/* create */).addInEdge(st);
+ } else {
+ // Property value.
+ modified|=get(s, true/* create */).addAttrib(st);
+ }
+ return modified;
+
+ }
+
+ public Iterator<Statement> inEdges(final Value v) {
+ final Vertex x = get(v, false/* create */);
+ if (x == null)
+ return EmptyIterator.DEFAULT;
+ return x.inEdges();
+ }
+
+ public Iterator<Statement> outEdges(final Value v) {
+ final Vertex x = get(v, false/* create */);
+ if (x == null)
+ return EmptyIterator.DEFAULT;
+ return x.outEdges();
+ }
+ public Iterator<Statement> attribs(final Value v) {
+ final Vertex x = get(v, false/* create */);
+ if (x == null)
+ return EmptyIterator.DEFAULT;
+ return x.attribs();
+ }
+
+ } // class RAMGraph
+
+ static public class RAMGraphAccessor implements IGraphAccessor {
+
+ private final RAMGraph g;
+
+ public RAMGraphAccessor(final RAMGraph g) {
+ if (g == null)
+ throw new IllegalArgumentException();
+ this.g = g;
+ }
+
+ @Override
+ public void advanceView() {
+ // NOP
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public Iterator<Statement> getEdges(final IGASContext<?, ?, ?> p,
+ final Value u, final EdgesEnum edges) {
+
+ try {
+ switch (edges) {
+ case NoEdges:
+ return EmptyIterator.DEFAULT;
+ case InEdges:
+ return getEdges(true/* inEdges */, p, u);
+ case OutEdges:
+ return getEdges(false/* inEdges */, p, u);
+ case AllEdges: {
+ final IStriterator a = getEdges(true/* inEdges */, p, u);
+ final IStriterator b = getEdges(false/* outEdges */, p, u);
+ a.append(b);
+ return a;
+ }
+ default:
+ throw new UnsupportedOperationException(edges.name());
+ }
+ } catch (SailException ex) {
+ throw new RuntimeException(ex);
+ }
+
+ }
+
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ private IStriterator getEdges(final boolean inEdges,
+ final IGASContext<?, ?, ?> ctx, final Value u)
+ throws SailException {
+
+ final URI linkTypeIV = (URI) ctx.getGASProgram().getLinkType();
+ if(linkTypeIV != null) {
+ /*
+ * FIXME RDR: We need to use a union of access paths for link
+ * attributes for the generic SAIL since it does not have the
+ * concept of statements about statements. This will require
+ * applying the access paths that will visit the appropriate
+ * reified triples. This needs to be done for both the standard
+ * path and the POS optimization code path.
+ */
+ throw new UnsupportedOperationException();
+ }
+ final Striterator sitr;
+ if(inEdges) {
+ sitr = new Striterator(g.get(u, false/*create*/).inEdges());
+ } else {
+ sitr = new Striterator(g.get(u, false/*create*/).outEdges());
+ }
+ /*
+ * Optionally wrap the program specified filter.
+ */
+ return ((IGASProgram) ctx.getGASProgram()).constrainFilter(ctx,
+ sitr);
+
+ }
+
+ @Override
+ public VertexDistribution getDistribution(final Random r) {
+
+ final VertexDistribution sample = new VertexDistribution(r);
+
+ for (Value v : g.vertices.keySet()) {
+
+ if (v instanceof Resource) {
+
+ sample.addSample((Resource) v);
+
+ }
+
+ }
+
+ return sample;
+
+ }
+
+ }
+
+}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASRunner.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASRunner.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASRunner.java 2013-09-12 21:00:56 UTC (rev 7402)
@@ -0,0 +1,130 @@
+package com.bigdata.rdf.graph.impl.ram;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.log4j.Logger;
+
+import com.bigdata.rdf.graph.IGASEngine;
+import com.bigdata.rdf.graph.IGraphAccessor;
+import com.bigdata.rdf.graph.impl.ram.RAMGASEngine.RAMGraph;
+import com.bigdata.rdf.graph.impl.ram.RAMGASEngine.RAMGraphAccessor;
+import com.bigdata.rdf.graph.impl.util.GASRunnerBase;
+
+/**
+ * Class for running GAS performance tests against the SAIL.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+public class RAMGASRunner<VS, ES, ST> extends GASRunnerBase<VS, ES, ST> {
+
+ private static final Logger log = Logger.getLogger(RAMGASRunner.class);
+
+ public RAMGASRunner(String[] args) throws ClassNotFoundException {
+ super(args);
+ }
+
+ protected class RAMOptionData extends GASRunnerBase<VS, ES, ST>.OptionData {
+
+ final private RAMGraph g = new RAMGraph();
+
+ public RAMGraph getGraph() {
+ synchronized(g) {
+ /*
+ * Note: Synchronization pattern is intended to provide
+ * visibility for graph traversal following a load of data into
+ * the graph.
+ */
+ return g;
+ }
+ }
+
+ @Override
+ public void init() throws Exception {
+
+ super.init();
+
+ }
+
+ @Override
+ public void shutdown() {
+
+ }
+ @Override
+ public boolean handleArg(final AtomicInteger i, final String[] args) {
+ if (super.handleArg(i, args)) {
+ return true;
+ }
+// final String arg = args[i.get()];
+// if (arg.equals("-bufferMode")) {
+// final String s = args[i.incrementAndGet()];
+// bufferModeOverride = BufferMode.valueOf(s);
+// } else if (arg.equals("-namespace")) {
+// final String s = args[i.incrementAndGet()];
+// namespaceOverride = s;
+// } else {
+// return false;
+// }
+ return false;
+ }
+
+ @Override
+ public void report(final StringBuilder sb) {
+ // NOP
+ }
+
+ } // class SAILOptionData
+
+ @Override
+ protected RAMOptionData newOptionData() {
+
+ return new RAMOptionData();
+
+ }
+
+ @Override
+ protected IGASEngine newGASEngine() {
+
+ return new RAMGASEngine(getOptionData().nthreads);
+
+ }
+
+ @Override
+ protected void loadFiles() throws Exception {
+
+ final RAMOptionData opt = getOptionData();
+
+ final String[] resources = opt.loadSet.toArray(new String[0]);
+
+ new RAMGraphLoader(opt.getGraph()).loadGraph(null/* fallback */,
+ resources);
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ protected RAMOptionData getOptionData() {
+
+ return (RAMOptionData) super.getOptionData();
+
+ }
+
+ @Override
+ protected IGraphAccessor newGraphAccessor() {
+
+ return new RAMGraphAccessor(getOptionData().g);
+
+ }
+
+ /**
+ * Performance testing harness.
+ *
+ * @see #GASRunner(String[])
+ */
+ @SuppressWarnings("rawtypes")
+ public static void main(final String[] args) throws Exception {
+
+ new RAMGASRunner(args).call();
+
+ }
+
+}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGraphFixture.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGraphFixture.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGraphFixture.java 2013-09-12 21:00:56 UTC (rev 7402)
@@ -0,0 +1,68 @@
+/**
+ Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+package com.bigdata.rdf.graph.impl.ram;
+
+import org.openrdf.sail.Sail;
+import org.openrdf.sail.SailConnection;
+import org.openrdf.sail.SailException;
+import org.openrdf.sail.memory.MemoryStore;
+
+import com.bigdata.rdf.graph.IGASEngine;
+import com.bigdata.rdf.graph.IGraphAccessor;
+import com.bigdata.rdf.graph.impl.ram.RAMGASEngine.RAMGraph;
+import com.bigdata.rdf.graph.impl.sail.SAILGASEngine.SAILGraphAccessor;
+import com.bigdata.rdf.graph.util.AbstractGraphFixture;
+
+public class RAMGraphFixture extends AbstractGraphFixture {
+
+ private RAMGraph g;
+
+ public RAMGraphFixture() throws SailException {
+ g = new RAMGraph();
+ }
+
+ public Sail getSail() {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Return the {@link RAMGraphFixture}.
+ */
+ public RAMGraph getGraph() {
+ return g;
+ }
+
+ @Override
+ public void destroy() throws SailException {
+ g = null;
+ }
+
+ @Override
+ public IGASEngine newGASEngine(int nthreads) {
+
+ return new RAMGASEngine(nthreads);
+
+ }
+
+ @Override
+ public IGraphAccessor newGraphAccessor(SailConnection cxnIsIgnored) {
+
+// return new RAMGraphAccessor(cxnIsIgnored);
+ throw new UnsupportedOperationException();
+
+ }
+
+}
\ No newline at end of file
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGraphLoader.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGraphLoader.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGraphLoader.java 2013-09-12 21:00:56 UTC (rev 7402)
@@ -0,0 +1,55 @@
+/**
+ Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+package com.bigdata.rdf.graph.impl.ram;
+
+import org.openrdf.model.Resource;
+import org.openrdf.model.Statement;
+import org.openrdf.rio.RDFHandlerException;
+import org.openrdf.sail.SailException;
+
+import com.bigdata.rdf.graph.impl.ram.RAMGASEngine.RAMGraph;
+import com.bigdata.rdf.graph.util.GraphLoader;
+
+public class RAMGraphLoader extends GraphLoader {
+
+ private final RAMGraph g;
+
+ public RAMGraphLoader(final RAMGraph g) {
+ if (g == null)
+ throw new IllegalArgumentException();
+ this.g = g;
+ }
+
+ @Override
+ protected AddStatementHandler newStatementHandler() {
+ return new RAMStatementHandler();
+ }
+
+ private class RAMStatementHandler extends AddStatementHandler {
+
+ @Override
+ protected void addStatement(final Statement stmt, final Resource[] c)
+ throws RDFHandlerException {
+
+ g.add(stmt);
+
+ ntriples++;
+
+ }
+
+ }
+
+}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/package.html
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/package.html (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/package.html 2013-09-12 21:00:56 UTC (rev 7402)
@@ -0,0 +1,14 @@
+<html>
+<head>
+<title>GAS Engine for RAM</title>
+</head>
+<body>
+ <p>This is an implementation of the GAS API for the Java concurrency
+ class. This package is designed to have no object encoding and
+ decoding overhead and to support high concurrency operations on the
+ graph. It does NOT implement the SAIL. Instead, it provides an RDF
+ graph abstraction using the openrdf data model that support efficient
+ operations for the GAS API (basically, efficient access to the
+ in-edges, out-edges, and attribute values).</p>
+</body>
+</html>
\ No newline at end of file
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASRunner.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASRunner.java 2013-09-12 17:07:59 UTC (rev 7401)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASRunner.java 2013-09-12 21:00:56 UTC (rev 7402)
@@ -1,3 +1,18 @@
+/**
+ Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
package com.bigdata.rdf.graph.impl.sail;
import java.util.concurrent.atomic.AtomicInteger;
@@ -13,7 +28,7 @@
import com.bigdata.rdf.graph.IGraphAccessor;
import com.bigdata.rdf.graph.impl.sail.SAILGASEngine.SAILGraphAccessor;
import com.bigdata.rdf.graph.impl.util.GASRunnerBase;
-import com.bigdata.rdf.graph.util.GASUtil;
+import com.bigdata.rdf.graph.util.SailGraphLoader;
/**
* Class for running GAS performance tests against the SAIL.
@@ -136,7 +151,7 @@
SailConnection cxn = null;
try {
cxn = opt.cxn;
- new GASUtil().loadGraph(cxn, null/* fallback */, resources);
+ new SailGraphLoader(cxn).loadGraph(null/* fallback */, resources);
cxn.commit();
ok = true;
} finally {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/util/AbstractGraphFixture.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/util/AbstractGraphFixture.java 2013-09-12 17:07:59 UTC (rev 7401)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/util/AbstractGraphFixture.java 2013-09-12 21:00:56 UTC (rev 7402)
@@ -28,7 +28,7 @@
SailConnection cxn = null;
try {
cxn = getSail().getConnection();
- new GASUtil().loadGraph(cxn, null/* fallback */, resources);
+ new SailGraphLoader(cxn).loadGraph(null/* fallback */, resources);
cxn.commit();
ok = true;
} finally {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/util/GASUtil.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/util/GASUtil.java 2013-09-12 17:07:59 UTC (rev 7401)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/util/GASUtil.java 2013-09-12 21:00:56 UTC (rev 7402)
@@ -15,34 +15,11 @@
*/
package com.bigdata.rdf.graph.util;
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.Reader;
-import java.net.URISyntaxException;
import java.util.Iterator;
import java.util.concurrent.TimeUnit;
-import java.util.zip.GZIPInputStream;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipInputStream;
-import org.apache.log4j.Logger;
-import org.openrdf.model.Resource;
import org.openrdf.model.Statement;
import org.openrdf.model.Value;
-import org.openrdf.rio.RDFFormat;
-import org.openrdf.rio.RDFHandlerException;
-import org.openrdf.rio.RDFParseException;
-import org.openrdf.rio.RDFParser;
-import org.openrdf.rio.RDFParserFactory;
-import org.openrdf.rio.RDFParserRegistry;
-import org.openrdf.rio.helpers.RDFHandlerBase;
-import org.openrdf.sail.SailConnection;
-import org.openrdf.sail.SailException;
import cutthecrap.utils.striterators.EmptyIterator;
@@ -53,7 +30,7 @@
*/
public class GASUtil {
- private static final Logger log = Logger.getLogger(GASUtil.class);
+// private static final Logger log = Logger.getLogger(GASUtil.class);
/**
* The average fan out of the frontier.
@@ -113,306 +90,4 @@
@SuppressWarnings({ "unchecked" })
public static final Iterator<Statement> EMPTY_EDGES_ITERATOR = EmptyIterator.DEFAULT;
- /**
- * Return the best guess at the {@link RDFFormat} for a resource.
- * <p>
- * Note: This handles the .gz and .zip extensions.
- *
- * @param n
- * The name of the resource.
- * @param rdfFormat
- * The fallback format (optional).
- *
- * @return The best guess format.
- */
- private RDFFormat guessRDFFormat(final String n, final RDFFormat rdfFormat) {
-
- RDFFormat fmt = RDFFormat.forFileName(n);
-
- if (fmt == null && n.endsWith(".zip")) {
- fmt = RDFFormat.forFileName(n.substring(0, n.length() - 4));
- }
-
- if (fmt == null && n.endsWith(".gz")) {
- fmt = RDFFormat.forFileName(n.substring(0, n.length() - 3));
- }
-
- if (fmt == null) // fallback
- fmt = rdfFormat;
-
- return fmt;
-
- }
-
- public void loadGraph(final Sail...
[truncated message content] |
|
From: <tho...@us...> - 2013-09-13 14:42:17
|
Revision: 7412
http://bigdata.svn.sourceforge.net/bigdata/?rev=7412&view=rev
Author: thompsonbry
Date: 2013-09-13 14:42:08 +0000 (Fri, 13 Sep 2013)
Log Message:
-----------
Changes to the sampling logic. The BD and SAIL implementations no longer include self-loops into the set from which the vertex sample is drawn.
The sampling logic has been modified to look at whether the algorithm wants to visit in-edges, out-edges, or all edges for GATHER. If there is no gather (no-edges), then it looks at the SCATTER phase. The samples are then only drawn from a distribution for the kinds of edges that will be traversed in the phase phase of the algorithm (either scatter or gather).
The sampling logic was modified to drop vertices that have zero-degree with respect to the type(s) of edges that are being sampled.
The GASRunner has been modified to drop vertices that are not connected to at least one other vertex by the execution of the GASProgram (the cumulative frontier size is one) and to report the number of such unconnected vertices that were found in the sample.
See #629 (graph mining)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASEngine.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASEngine.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/GASRunnerBase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/VertexDistribution.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASEngine.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASEngine.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASEngine.java 2013-09-13 12:41:49 UTC (rev 7411)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASEngine.java 2013-09-13 14:42:08 UTC (rev 7412)
@@ -230,11 +230,11 @@
boolean modified = false;
if (o instanceof URI) {
// Edge
- modified|=get(s, true/* create */).addOutEdge(st);
- modified|=get(o, true/* create */).addInEdge(st);
+ modified |= get(s, true/* create */).addOutEdge(st);
+ modified |= get(o, true/* create */).addInEdge(st);
} else {
// Property value.
- modified|=get(s, true/* create */).addAttrib(st);
+ modified |= get(s, true/* create */).addAttrib(st);
}
return modified;
@@ -341,12 +341,27 @@
final VertexDistribution sample = new VertexDistribution(r);
- for (Value v : g.vertices.keySet()) {
+ for (RAMGraph.Vertex vertex : g.vertices.values()) {
+ final Value v = vertex.v;
+
if (v instanceof Resource) {
- sample.addSample((Resource) v);
+ /*
+ * FIXME This is not ignoring self-loops. Realistically, we
+ * want to include them in the data since they are part of
+ * the data, but we do not want to consider them in samples
+ * since they do not actually go anywhere. The SAIL and BD
+ * implementations of this method filter out self-loops, but
+ * this implementation does not.
+ */
+
+ if (vertex.getInEdgeCount() > 0)
+ sample.addInEdgeSample((Resource) v);
+ if (vertex.getOutEdgeCount() > 0)
+ sample.addOutEdgeSample((Resource) v);
+
}
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASEngine.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASEngine.java 2013-09-13 12:41:49 UTC (rev 7411)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASEngine.java 2013-09-13 14:42:08 UTC (rev 7412)
@@ -246,10 +246,17 @@
continue;
}
- sample.addSample(st.getSubject());
+ if (st.getSubject().equals(st.getObject())) {
- sample.addSample((Resource) st.getObject());
+ // ignore self-loops.
+ continue;
+ }
+
+ sample.addOutEdgeSample(st.getSubject());
+
+ sample.addInEdgeSample((Resource) st.getObject());
+
}
} finally {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/GASRunnerBase.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/GASRunnerBase.java 2013-09-13 12:41:49 UTC (rev 7411)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/GASRunnerBase.java 2013-09-13 14:42:08 UTC (rev 7412)
@@ -9,6 +9,7 @@
import org.apache.log4j.Logger;
import org.openrdf.model.Value;
+import com.bigdata.rdf.graph.EdgesEnum;
import com.bigdata.rdf.graph.IGASContext;
import com.bigdata.rdf.graph.IGASEngine;
import com.bigdata.rdf.graph.IGASProgram;
@@ -415,10 +416,24 @@
final VertexDistribution dist = graphAccessor.getDistribution(opt.r);
- final Value[] sampled = dist.getWeightedSample(opt.nsamples);
+ // Assume that a GATHER will be done for each starting vertex.
+ EdgesEnum edges = gasProgram.getGatherEdges();
+ if (edges == EdgesEnum.NoEdges) {
+
+ // If no GATHER is performed, then use the SCATTER edges.
+ edges = gasProgram.getScatterEdges();
+
+ }
+
+ final Value[] sampled = dist.getWeightedSample(opt.nsamples,
+ edges);
+
final IGASStats total = new GASStats();
+ // #of vertices that were not connected for that analytic.
+ long nunconnected = 0;
+
for (int i = 0; i < sampled.length; i++) {
final Value startingVertex = sampled[i];
@@ -427,6 +442,19 @@
final IGASStats stats = (IGASStats) gasContext.call();
+ if (stats.getFrontierSize() == 1) {
+ /*
+ * The starting vertex was not actually connected to any
+ * other vertices by the traversal performed by the GAS
+ * program.
+ */
+ if (log.isInfoEnabled())
+ log.info("Ignoring unconnected startingVertex: "
+ + startingVertex + ", stats=" + stats);
+ nunconnected++;
+ continue;
+ }
+
total.add(stats);
if (log.isInfoEnabled()) {
@@ -445,6 +473,7 @@
sb.append(", nsamples=" + opt.nsamples); // #desired samples
sb.append(", nsampled=" + sampled.length);// #actually sampled
sb.append(", distSize=" + dist.size());// #available for sampling.
+ sb.append(", nunconnected=" + nunconnected);// #unconnected vertices.
sb.append(", nthreads=" + opt.nthreads);
sb.append(", scheduler=" + ((GASState<VS, ES, ST>)gasState).getScheduler().getClass().getSimpleName());
sb.append(", gasEngine=" + gasEngine.getClass().getSimpleName());
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/VertexDistribution.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/VertexDistribution.java 2013-09-13 12:41:49 UTC (rev 7411)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/VertexDistribution.java 2013-09-13 14:42:08 UTC (rev 7412)
@@ -23,6 +23,8 @@
import org.openrdf.model.Resource;
+import com.bigdata.rdf.graph.EdgesEnum;
+
/**
* Utility class for sampling vertices from a graph.
* <p>
@@ -47,16 +49,28 @@
* A sample.
*/
private static class VertexSample {
- /** The frequence of the {@link Resource}. */
- public double f;
/** The {@link Resource}. */
public final Resource v;
+ /** The #of the {@link Resource} occurring as the target of an in-edge. */
+ public int in;
+ /** The #of the {@link Resource} occurring as the source of an out-edge. */
+ public int out;
- // /** The #of times the {@link Resource} has been selected. */
- // public int n;
- public VertexSample(final double f, final Resource v) {
- this.f = f;
+ /**
+ *
+ * @param v
+ * The resource.
+ * @param in
+ * The #of times this {@link Resource} has been observed as
+ * the target of an in-edge.
+ * @param out
+ * The #of times this {@link Resource} has been observed as
+ * the source of an out-edge.
+ */
+ public VertexSample(final Resource v, final int in, final int out) {
this.v = v;
+ this.in = in;
+ this.out = out;
// this.n = 0;
}
}
@@ -87,19 +101,19 @@
}
/**
- * Add a sample.
+ * Add a sample of a vertex having some out-edge.
*
* @param v
* The vertex.
*/
- public void addSample(final Resource v) {
+ public void addOutEdgeSample(final Resource v) {
VertexSample s = samples.get(v);
if (s == null) {
// new sample.
- samples.put(v, s = new VertexSample(1d/* f */, v));
+ samples.put(v, s = new VertexSample(v, 0/* in */, 1/* out */));
// indexOf that sample.
indexOf.put(samples.size() - 1, s);
@@ -109,6 +123,28 @@
}
/**
+ * Add a sample of a vertex having some in-edge.
+ *
+ * @param v
+ * The vertex.
+ */
+ public void addInEdgeSample(final Resource v) {
+
+ VertexSample s = samples.get(v);
+
+ if (s == null) {
+
+ // new sample.
+ samples.put(v, s = new VertexSample(v, 1/* in */, 0/* out */));
+
+ // indexOf that sample.
+ indexOf.put(samples.size() - 1, s);
+
+ }
+
+ }
+
+ /**
* Return the #of samples in the distribution from which a called specified
* number of samples may then drawn using a random sampling without
* replacement technique.
@@ -123,19 +159,35 @@
}
/**
- * Build a normalized vector over the sample frequences. The indices of the
- * sample vector are correlated with the {@link #indexOf} map. The values in
- * the normalized vector are in <code>[0:1]</code> and sum to
- * <code>1.0</code>.
+ * Build a vector over the samples. The indices of the sample vector are
+ * correlated with the {@link #indexOf} map.
+ *
+ * @param edges
+ * Only vertice having the specified type(s) of edges will be
+ * included in the distribution.
+ * @param normalize
+ * When <code>true</code> the vector will be normalized such that
+ * the elements in the vector are in <code>[0:1]</code> and sum
+ * to <code>1.0</code>. When <code>false</code> the elements of
+ * the vector are the unnormalized sum of the #of edges of the
+ * specified type(s).
+ *
+ * @return The distribution vector over the samples.
*/
- double[] getNormVector() {
+ double[] getVector(final EdgesEnum edges, final boolean normalize) {
- final double[] norm = new double[samples.size()];
+ if (edges == null)
+ throw new IllegalArgumentException();
- if (norm.length == 0) {
+ if (edges == EdgesEnum.NoEdges)
+ throw new IllegalArgumentException();
+ final double[] a = new double[samples.size()];
+
+ if (a.length == 0) {
+
// No samples. Avoid division by zero.
- return norm;
+ return a;
}
@@ -145,18 +197,64 @@
for (VertexSample s : samples.values()) {
- norm[i++] = sum += s.f;
+ final double d;
+ switch (edges) {
+ case InEdges:
+ d = s.in;
+ break;
+ case OutEdges:
+ d = s.out;
+ break;
+ case AllEdges:
+ d = (s.in + s.out);
+ break;
+ default:
+ throw new AssertionError();
+ }
+
+ if (d == 0)
+ continue;
+ if (normalize) {
+ a[i] = sum += d;
+ } else {
+ a[i] = d;
+ }
+
+ i++;
+
}
+ final int nfound = i;
- for (i = 0; i < norm.length; i++) {
+ if (nfound == 0) {
+ // Empty sample.
+ return new double[0];
+ }
+
+ if (normalize) {
- norm[i] /= sum;
+ for (i = 0; i < a.length; i++) {
+ a[i] /= sum;
+
+ }
+
}
- return norm;
+ if (nfound < a.length) {
+ // Make the array dense.
+
+ final double[] b = new double[nfound];
+
+ System.arraycopy(a/* src */, 0/* srcPos */, b/* dest */,
+ 0/* destPos */, nfound/* length */);
+
+ return b;
+ }
+
+ return a;
+
}
/**
@@ -165,10 +263,15 @@
*
* @param desiredSampleSize
* The desired sample size.
+ * @param edges
+ * The sample is taken from vertices having the specified type(s)
+ * of edges. Vertices with zero degree for the specified type(s)
+ * of edges will not be present in the returned sampled.
*
* @return The distinct samples that were found.
*/
- public Resource[] getWeightedSample(final int desiredSampleSize) {
+ public Resource[] getWeightedSample(final int desiredSampleSize,
+ final EdgesEnum edges) {
if (samples.isEmpty()) {
@@ -178,7 +281,7 @@
}
// Build a normalized vector over the sample.
- final double[] norm = getNormVector();
+ final double[] norm = getVector(edges, true/* normalized */);
// Maximum number of samples to attempt.
final int limit = (int) Math.min(desiredSampleSize * 3L,
@@ -218,10 +321,16 @@
* at random without regard to their frequency distribution.
*
* @param desiredSampleSize
+ * The desired sample size.
+ * @param edges
+ * The sample is taken from vertices having the specified type(s)
+ * of edges. Vertices with zero degree for the specified type(s)
+ * of edges will not be present in the returned sampled.
*
- * @return
+ * @return The distinct samples that were found.
*/
- public Resource[] getUnweightedSample(final int desiredSampleSize) {
+ public Resource[] getUnweightedSample(final int desiredSampleSize,
+ final EdgesEnum edges) {
if (samples.isEmpty()) {
@@ -230,6 +339,9 @@
}
+ // Build a vector over the sample.
+ final double[] vec = getVector(edges, true/* normalized */);
+
// Maximum number of samples to attempt.
final int limit = (int) Math.min(desiredSampleSize * 3L,
Integer.MAX_VALUE);
@@ -239,11 +351,9 @@
// The selected samples.
final Set<Resource> selected = new HashSet<Resource>();
- final int nsamples = this.samples.size();
-
while (selected.size() < desiredSampleSize && round++ < limit) {
- final int i = r.nextInt(nsamples);
+ final int i = r.nextInt(vec.length);
final Resource v = indexOf.get(Integer.valueOf(i)).v;
@@ -252,6 +362,23 @@
}
return selected.toArray(new Resource[selected.size()]);
+
+// // The selected samples.
+// final Set<Resource> selected = new HashSet<Resource>();
+//
+// final int nsamples = this.samples.size();
+//
+// while (selected.size() < desiredSampleSize && round++ < limit) {
+//
+// final int i = r.nextInt(nsamples);
+//
+// final Resource v = indexOf.get(Integer.valueOf(i)).v;
+//
+// selected.add(v);
+//
+// }
+//
+// return selected.toArray(new Resource[selected.size()]);
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASEngine.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASEngine.java 2013-09-13 12:41:49 UTC (rev 7411)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASEngine.java 2013-09-13 14:42:08 UTC (rev 7412)
@@ -494,10 +494,17 @@
continue;
}
- sample.addSample((Resource) spo.s());
+ if (spo.s().equals(spo.o())) {
- sample.addSample((Resource) spo.o());
+ // ignore self-loops.
+ continue;
+ }
+
+ sample.addOutEdgeSample((Resource) spo.s());
+
+ sample.addInEdgeSample((Resource) spo.o());
+
}
return sample;
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2013-09-17 14:16:33
|
Revision: 7414
http://bigdata.svn.sourceforge.net/bigdata/?rev=7414&view=rev
Author: thompsonbry
Date: 2013-09-17 14:16:25 +0000 (Tue, 17 Sep 2013)
Log Message:
-----------
Bug fix for #743 (AbstractTripleStore.destroy() does not filter for correct prefix).
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/IIndexStore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/AbstractResource.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/ListIndicesTask.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/IIndexStore.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/IIndexStore.java 2013-09-13 19:35:09 UTC (rev 7413)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/IIndexStore.java 2013-09-17 14:16:25 UTC (rev 7414)
@@ -62,7 +62,13 @@
* Iterator visits the names of all indices spanned by the given prefix.
*
* @param prefix
- * The prefix (optional).
+ * The prefix (optional). When given, this MUST include a
+ * <code>.</code> if you want to restrict the scan to only those
+ * indices in a given namespace. Otherwise you can find indices
+ * in <code>kb2</code> if you provide the prefix <code>kb</code>
+ * where both kb and kb2 are namespaces since the indices spanned
+ * by <code>kb</code> would include both <code>kb.xyz</code> and
+ * <code>kb2.xyx</code>.
* @param timestamp
* A timestamp which represents either a possible commit time on
* the store or a read-only transaction identifier.
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java 2013-09-13 19:35:09 UTC (rev 7413)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java 2013-09-17 14:16:25 UTC (rev 7414)
@@ -1600,6 +1600,14 @@
* {@link Name2Addr#getKey(String)} and
* {@link Name2AddrTupleSerializer#serializeKey(Object)}
* implementation (depending on how the keys are being encoded).
+ * <p>
+ * Update: See <a
+ * href="https://sourceforge.net/apps/trac/bigdata/ticket/743">
+ * AbstractTripleStore.destroy() does not filter for correct prefix
+ * </a> as well. Maybe the problem is just that we need to have the
+ * "." appended to the namespace. This could be something that is
+ * done automatically if the caller does not take care of it
+ * themselves.
*/
public static final Iterator<String> indexNameScan(final String prefix,
final IIndex n2a) {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/AbstractResource.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/AbstractResource.java 2013-09-13 19:35:09 UTC (rev 7413)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/AbstractResource.java 2013-09-17 14:16:25 UTC (rev 7414)
@@ -694,8 +694,8 @@
*/
{
- final Iterator<String> itr = indexManager.indexNameScan(namespace,
- timestamp);
+ final Iterator<String> itr = indexManager.indexNameScan(namespace
+ + ".", timestamp);
while (itr.hasNext()) {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/ListIndicesTask.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/ListIndicesTask.java 2013-09-13 19:35:09 UTC (rev 7413)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/ListIndicesTask.java 2013-09-17 14:16:25 UTC (rev 7414)
@@ -54,6 +54,13 @@
* @param namespace
* The namespace prefix for the indices to be returned (may be an
* empty string to return the names of all registered indices).
+ * <p>
+ * Note: This SHOULD include a "." if you want to restrict the
+ * scan to only those indices in a given namespace. Otherwise you
+ * can find indices in <code>kb2</code> if you provide the prefix
+ * <code>kb</code> and both kb and kb2 are namespaces since the
+ * indices spanned by <code>kb</code> would include both
+ * <code>kb.xyz</code> and <code>kb2.xyx</code>.
*/
public ListIndicesTask(final long ts, final String namespace) {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java 2013-09-13 19:35:09 UTC (rev 7413)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java 2013-09-17 14:16:25 UTC (rev 7414)
@@ -37,6 +37,7 @@
import com.bigdata.journal.ITx;
import com.bigdata.rdf.lexicon.LexiconKeyOrder;
import com.bigdata.rdf.spo.SPOKeyOrder;
+import com.bigdata.relation.AbstractResource;
import com.bigdata.relation.RelationSchema;
import com.bigdata.relation.locator.DefaultResourceLocator;
import com.bigdata.sparse.ITPS;
@@ -282,4 +283,227 @@
}
+ /**
+ * Verify the namespace prefix for the triple store is imposed correctly in
+ * {@link AbstractResource#destroy()}. Create two KBs such that the
+ * namespace for one instance is a prefix of the namespace for the other
+ * instance, e.g.,
+ *
+ * <pre>
+ * kb
+ * kb1
+ * </pre>
+ *
+ * Verify that destroying <code>kb</code> does not cause the indices for
+ * <code>kb1</code> to be destroyed.
+ *
+ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/743">
+ * AbstractTripleStore.destroy() does not filter for correct prefix
+ * </a>
+ */
+ public void test_destroyTwo() {
+
+ final String namespace = "kb";
+ final String namespaceLexiconRelation = namespace + ".lex";
+ final String namespaceSPORelation = namespace + ".spo";
+ final String lexiconRelationIndexName = namespaceLexiconRelation + "."
+ + LexiconKeyOrder.TERM2ID.getIndexName();
+ final String primaryStatementIndexName = namespaceSPORelation + "."
+ + SPOKeyOrder.SPO.getIndexName();
+
+ final String namespace1 = "kb1";
+ final String namespaceLexiconRelation1 = namespace1 + ".lex";
+ final String namespaceSPORelation1 = namespace1 + ".spo";
+ final String lexiconRelationIndexName1 = namespaceLexiconRelation1
+ + "." + LexiconKeyOrder.TERM2ID.getIndexName();
+ final String primaryStatementIndexName1 = namespaceSPORelation1 + "."
+ + SPOKeyOrder.SPO.getIndexName();
+
+ final Properties properties = new Properties();
+ properties.setProperty(com.bigdata.journal.Options.CREATE_TEMP_FILE, "true");
+ properties.setProperty(AbstractTripleStore.Options.BUFFER_MODE,BufferMode.DiskWORM.toString());
+ properties.setProperty(AbstractTripleStore.Options.TRIPLES_MODE,"true");
+
+ final AbstractTripleStore kb = getStore(properties);
+
+ final IIndexManager indexManager = kb.getIndexManager();
+
+ try {
+
+ assertEquals(namespace, kb.getNamespace());
+
+ final AbstractTripleStore kb1 = new LocalTripleStore(indexManager,
+ namespace1, ITx.UNISOLATED, properties);
+ kb1.create();
+
+ // make the tripleStore dirty so commit() will do something.
+ kb.addTerm(kb.getValueFactory().createLiteral("bigdata"));
+ kb1.addTerm(kb.getValueFactory().createLiteral("bigdata"));
+
+ // Verify post-conditions of the created KBs.
+ {
+
+ /*
+ * Verify that both triple store declarations exist in the GRS.
+ *
+ * Note: Will be in lexical order for Unicode.
+ */
+ final String[] namespaces = getNamespaces(indexManager)
+ .toArray(new String[] {});
+ assertEquals(new String[] { namespace, namespace1 }, namespaces);
+
+ /*
+ * Verify that the unislolated versions of each triple stores is
+ * the same reference that we obtained above when that triple
+ * store was created.
+ */
+ assertTrue(kb == indexManager.getResourceLocator().locate(
+ kb.getNamespace(), ITx.UNISOLATED));
+ assertTrue(kb1 == indexManager.getResourceLocator().locate(
+ kb1.getNamespace(), ITx.UNISOLATED));
+
+ /* Verify lexicon relations exist. */
+ assertTrue(kb.getLexiconRelation() == indexManager
+ .getResourceLocator().locate(namespaceLexiconRelation,
+ ITx.UNISOLATED));
+ assertTrue(kb1.getLexiconRelation() == indexManager
+ .getResourceLocator().locate(namespaceLexiconRelation1,
+ ITx.UNISOLATED));
+
+ /* Verify SPO relations exist. */
+ assertTrue(kb.getSPORelation() == indexManager
+ .getResourceLocator().locate(namespaceSPORelation,
+ ITx.UNISOLATED));
+ assertTrue(kb1.getSPORelation() == indexManager
+ .getResourceLocator().locate(namespaceSPORelation1,
+ ITx.UNISOLATED));
+
+ /* Verify lexicon index exists. */
+ assertNotNull(indexManager.getIndex(lexiconRelationIndexName,
+ ITx.UNISOLATED));
+ assertNotNull(indexManager.getIndex(lexiconRelationIndexName1,
+ ITx.UNISOLATED));
+
+ /* Verify primary SPO index exists. */
+ assertNotNull(indexManager.getIndex(primaryStatementIndexName,
+ ITx.UNISOLATED));
+ assertNotNull(indexManager.getIndex(primaryStatementIndexName1,
+ ITx.UNISOLATED));
+
+ }
+
+ /* Commit. */
+ final long commitTime = kb.commit();
+ assertTrue(commitTime > 0);
+
+ /*
+ * Destroy the triple store whose namespace is a prefix of the 2nd
+ * triple store namespace.
+ */
+ {
+ kb.destroy();
+
+ // global row store entry is gone.
+ final String[] namespaces = getNamespaces(indexManager).toArray(
+ new String[] {});
+ assertEquals(new String[] { namespace1 }, namespaces);
+
+ // resources can not be located.
+ assertTrue(null == indexManager.getResourceLocator().locate(
+ namespace, ITx.UNISOLATED));
+ assertTrue(null == indexManager.getResourceLocator().locate(
+ namespaceLexiconRelation, ITx.UNISOLATED));
+ assertTrue(null == indexManager.getResourceLocator().locate(
+ namespaceSPORelation, ITx.UNISOLATED));
+
+ // indicies are gone.
+ assertNull(indexManager.getIndex(lexiconRelationIndexName,
+ ITx.UNISOLATED));
+ assertNull(indexManager.getIndex(primaryStatementIndexName,
+ ITx.UNISOLATED));
+
+ // The committed version of the triple store remains visible.
+ assertNotNull(indexManager.getResourceLocator().locate(
+ namespace, commitTime - 1));
+ }
+
+ /*
+ * Verify that the other kb still exists, including its GRS
+ * declaration and its indices.
+ */
+ {
+
+ /*
+ * Verify that the triple store declaration exists in the GRS.
+ *
+ * Note: Will be in lexical order for Unicode.
+ */
+ final String[] namespaces = getNamespaces(indexManager).toArray(
+ new String[] {});
+ assertEquals(new String[] { namespace1 }, namespaces);
+
+ /*
+ * Verify that the unislolated versions of each triple stores is the
+ * same reference that we obtained above when that triple store was
+ * created.
+ */
+ assertTrue(kb1 == indexManager.getResourceLocator().locate(
+ kb1.getNamespace(), ITx.UNISOLATED));
+
+ /* Verify lexicon relations exist. */
+ assertTrue(kb1.getLexiconRelation() == indexManager
+ .getResourceLocator().locate(namespaceLexiconRelation1,
+ ITx.UNISOLATED));
+
+ /* Verify SPO relations exist. */
+ assertTrue(kb1.getSPORelation() == indexManager
+ .getResourceLocator().locate(namespaceSPORelation1,
+ ITx.UNISOLATED));
+
+ /* Verify lexicon index exists. */
+ assertNotNull(indexManager.getIndex(lexiconRelationIndexName1,
+ ITx.UNISOLATED));
+
+ /* Verify primary SPO index exists. */
+ assertNotNull(indexManager.getIndex(primaryStatementIndexName1,
+ ITx.UNISOLATED));
+
+ }
+
+ /*
+ * Destroy the other triple store.
+ */
+ {
+ kb1.destroy();
+
+ // global row store entry is gone.
+ assertTrue(getNamespaces(indexManager).isEmpty());
+
+ // resources can not be located.
+ assertTrue(null == indexManager.getResourceLocator().locate(
+ namespace1, ITx.UNISOLATED));
+ assertTrue(null == indexManager.getResourceLocator().locate(
+ namespaceLexiconRelation1, ITx.UNISOLATED));
+ assertTrue(null == indexManager.getResourceLocator().locate(
+ namespaceSPORelation1, ITx.UNISOLATED));
+
+ // indicies are gone.
+ assertNull(indexManager.getIndex(lexiconRelationIndexName1,
+ ITx.UNISOLATED));
+ assertNull(indexManager.getIndex(primaryStatementIndexName1,
+ ITx.UNISOLATED));
+
+ // The committed version of the triple store remains visible.
+ assertNotNull(indexManager.getResourceLocator().locate(
+ namespace1, commitTime - 1));
+ }
+
+ } finally {
+
+ indexManager.destroy();
+
+ }
+
+ }
+
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <jer...@us...> - 2013-09-26 14:52:57
|
Revision: 7418
http://bigdata.svn.sourceforge.net/bigdata/?rev=7418&view=rev
Author: jeremy_carroll
Date: 2013-09-26 14:52:49 +0000 (Thu, 26 Sep 2013)
Log Message:
-----------
Test and fix for optimizing optionals with ALPP. trac746
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTComplexOptionalOptimizer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTNamedSubqueryOptimizer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/AbstractBigdataExprBuilderTestCase.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/ticket746.txt
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTComplexOptionalOptimizer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTComplexOptionalOptimizer.java 2013-09-21 12:39:31 UTC (rev 7417)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTComplexOptionalOptimizer.java 2013-09-26 14:52:49 UTC (rev 7418)
@@ -36,6 +36,7 @@
import com.bigdata.bop.IBindingSet;
import com.bigdata.bop.IVariable;
import com.bigdata.rdf.sparql.ast.ASTBase;
+import com.bigdata.rdf.sparql.ast.ArbitraryLengthPathNode;
import com.bigdata.rdf.sparql.ast.AssignmentNode;
import com.bigdata.rdf.sparql.ast.FilterNode;
import com.bigdata.rdf.sparql.ast.GraphPatternGroup;
@@ -346,7 +347,8 @@
|| t instanceof NamedSubqueryInclude
|| t instanceof SubqueryRoot
|| t instanceof ServiceNode
- || t instanceof UnionNode) {
+ || t instanceof UnionNode
+ || t instanceof ArbitraryLengthPathNode ) {
// Moved to the named subquery.
// move.add(t);
group.removeChild(t);
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTNamedSubqueryOptimizer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTNamedSubqueryOptimizer.java 2013-09-21 12:39:31 UTC (rev 7417)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTNamedSubqueryOptimizer.java 2013-09-26 14:52:49 UTC (rev 7418)
@@ -27,17 +27,28 @@
package com.bigdata.rdf.sparql.ast.optimizers;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Iterator;
import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import org.apache.commons.io.IOUtils;
import org.openrdf.model.impl.URIImpl;
import org.openrdf.model.vocabulary.RDF;
import org.openrdf.model.vocabulary.RDFS;
+import org.openrdf.query.MalformedQueryException;
import org.openrdf.query.algebra.StatementPattern.Scope;
+import com.bigdata.bop.BOpUtility;
import com.bigdata.bop.IBindingSet;
import com.bigdata.bop.IVariable;
import com.bigdata.bop.Var;
import com.bigdata.rdf.internal.IV;
+import com.bigdata.rdf.sail.sparql.Bigdata2ASTSPARQLParser;
+import com.bigdata.rdf.sail.sparql.TestSubqueryPatterns;
+import com.bigdata.rdf.sail.sparql.ast.ParseException;
+import com.bigdata.rdf.sail.sparql.ast.TokenMgrError;
import com.bigdata.rdf.sparql.ast.ASTContainer;
import com.bigdata.rdf.sparql.ast.AbstractASTEvaluationTestCase;
import com.bigdata.rdf.sparql.ast.ConstantNode;
@@ -52,6 +63,7 @@
import com.bigdata.rdf.sparql.ast.StatementPatternNode;
import com.bigdata.rdf.sparql.ast.StaticAnalysis;
import com.bigdata.rdf.sparql.ast.SubqueryRoot;
+import com.bigdata.rdf.sparql.ast.ValueExpressionNode;
import com.bigdata.rdf.sparql.ast.VarNode;
import com.bigdata.rdf.sparql.ast.eval.AST2BOpContext;
import com.bigdata.rdf.vocab.decls.FOAFVocabularyDecl;
@@ -830,4 +842,33 @@
}
+
+ /**
+ * Unit test for WITH {subquery} AS "name" and INCLUDE. The WITH must be in
+ * the top-level query.
+ *
+ * This is specifically for Trac 746 which crashed out during optimize.
+ * So the test simply runs that far, and does not verify anything
+ * other than the ability to optimize without an exception
+ * @throws IOException
+ */
+ public void test_namedSubquery746() throws MalformedQueryException,
+ TokenMgrError, ParseException, IOException {
+
+ final String sparql = IOUtils.toString(getClass().getResourceAsStream("ticket746.txt"));
+
+
+ final QueryRoot ast = new Bigdata2ASTSPARQLParser(store).parseQuery2(sparql,
+ baseURI).getOriginalAST();
+
+
+ final IASTOptimizer rewriter = new DefaultOptimizerList();
+
+ final AST2BOpContext context = new AST2BOpContext(new ASTContainer(
+ ast), store);
+ rewriter.optimize(context, ast/* queryNode */, null);
+
+
+ }
+
}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/ticket746.txt
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/ticket746.txt (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/ticket746.txt 2013-09-26 14:52:49 UTC (rev 7418)
@@ -0,0 +1,35 @@
+base <http://example.org/>
+prefix sys: </bdm/api/kbobject/sys:>
+prefix base: </bdm/api/kbobject/base:>
+prefix syapse: </graph/syapse#>
+prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
+prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
+
+SELECT *
+
+WITH {
+SELECT *
+
+WHERE {
+
+ OPTIONAL {
+ base:disease syapse:hasLiteralProperty $j2 .
+ ?Sample_A $j2 ?j1
+ }
+ OPTIONAL {
+ base:species syapse:hasLiteralProperty $j4 .
+ ?Sample_A $j4 ?j3
+ }
+ OPTIONAL {
+ ?Sample_A sys:name ?j5
+ }
+ ?Sample_A rdf:type / rdfs:subClassOf * base:MammalianCellLineSample
+}
+
+} AS %__UserQuery
+
+WHERE {
+
+INCLUDE %__UserQuery
+
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/AbstractBigdataExprBuilderTestCase.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/AbstractBigdataExprBuilderTestCase.java 2013-09-21 12:39:31 UTC (rev 7417)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/AbstractBigdataExprBuilderTestCase.java 2013-09-26 14:52:49 UTC (rev 7418)
@@ -237,7 +237,7 @@
/**
* Applies the {@link Bigdata2ASTSPARQLParser}.
*/
- protected QueryRoot parse(final String queryStr, final String baseURI)
+ public QueryRoot parse(final String queryStr, final String baseURI)
throws MalformedQueryException {
final QueryRoot ast = new Bigdata2ASTSPARQLParser(tripleStore).parseQuery2(queryStr,
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <mrp...@us...> - 2013-09-28 19:19:07
|
Revision: 7422
http://bigdata.svn.sourceforge.net/bigdata/?rev=7422&view=rev
Author: mrpersonick
Date: 2013-09-28 19:18:59 +0000 (Sat, 28 Sep 2013)
Log Message:
-----------
remote query cancellation
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java 2013-09-28 18:35:26 UTC (rev 7421)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java 2013-09-28 19:18:59 UTC (rev 7422)
@@ -27,6 +27,8 @@
package com.bigdata.rdf.sparql.ast.service;
+import java.util.UUID;
+
import org.apache.http.HttpResponse;
import org.apache.http.impl.client.DefaultHttpClient;
import org.openrdf.query.BindingSet;
@@ -134,7 +136,11 @@
// opts.queryStr = queryStr;
+ final UUID queryId = UUID.randomUUID();
+
o.addRequestParam("query", queryStr);
+
+ o.addRequestParam("queryId", queryId.toString());
final RemoteRepository repo = new RemoteRepository(uriStr,
new DefaultHttpClient(params.getClientConnectionManager()),
@@ -152,14 +158,16 @@
try {
- final HttpResponse resp = repo.doConnect(o);
+// final HttpResponse resp = repo.doConnect(o);
+//
+// RemoteRepository.checkResponseCode(resp);
+//
+// queryResult = repo.tupleResults(resp);
+//
+//// queryResult = parseResults(checkResponseCode(doSparqlQuery(opts)));
+
+ queryResult = repo.tupleResults(o, queryId);
- RemoteRepository.checkResponseCode(resp);
-
- queryResult = repo.tupleResults(resp);
-
-// queryResult = parseResults(checkResponseCode(doSparqlQuery(opts)));
-
} finally {
/*
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2013-09-28 18:35:26 UTC (rev 7421)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2013-09-28 19:18:59 UTC (rev 7422)
@@ -151,7 +151,7 @@
* The name of a request parameter whose value is the {@link UUID} of a
* top-level query.
*/
- public static final String QUERY_ID = "queryId";
+ private static final String QUERY_ID = "queryId";
/**
* The name of a request parameter used to cancel a running query. At least
@@ -160,7 +160,7 @@
*
* @see #QUERY_ID
*/
- public static final String CANCEL_QUERY = "cancelQuery";
+ protected static final String CANCEL_QUERY = "cancelQuery";
/**
* Request a snapshot of the journal (HA only). The snapshot will be written
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2013-09-28 18:35:26 UTC (rev 7421)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2013-09-28 19:18:59 UTC (rev 7422)
@@ -99,8 +99,6 @@
import org.xml.sax.Attributes;
import org.xml.sax.ext.DefaultHandler2;
-import com.bigdata.rdf.sail.webapp.StatusServlet;
-
// Note: Do not import. Not part of the bigdata-client.jar
//
//import com.bigdata.rdf.sparql.ast.service.RemoteServiceOptions;
@@ -329,13 +327,13 @@
opts.method = "GET";
- HttpResponse response = null;
+// HttpResponse response = null;
opts.setAcceptHeader(ConnectOptions.DEFAULT_GRAPH_ACCEPT_HEADER);
- checkResponseCode(response = doConnect(opts));
+// checkResponseCode(response = doConnect(opts));
- return graphResults(response);
+ return graphResults(opts, null);
}
@@ -531,11 +529,14 @@
*/
public void cancel(final UUID queryId) throws Exception {
+ if (queryId == null)
+ return;
+
final ConnectOptions opts = newUpdateConnectOptions();
- opts.addRequestParam(StatusServlet.CANCEL_QUERY);
+ opts.addRequestParam("cancelQuery");
- opts.addRequestParam(StatusServlet.QUERY_ID, queryId.toString());
+ opts.addRequestParam("queryId", queryId.toString());
checkResponseCode(doConnect(opts));
@@ -1037,14 +1038,10 @@
@Override
public GraphQueryResult evaluate() throws Exception {
- HttpResponse response = null;
-
setupConnectOptions();
- checkResponseCode(response = doConnect(opts));
+ return graphResults(opts, getQueryId());
- return graphResults(response);
-
}
}
@@ -1072,30 +1069,34 @@
@Override
public boolean evaluate() throws Exception {
+
+ setupConnectOptions();
- HttpResponse response = null;
- try {
+ return booleanResults(opts, getQueryId());
- setupConnectOptions();
-
- checkResponseCode(response = doConnect(opts));
-
- return booleanResults(response);
-
- } finally {
-
- try {
-
- if (response != null)
- EntityUtils.consume(response.getEntity());
-
- } catch (Exception ex) {
-
- log.warn(ex);
-
- }
-
- }
+// HttpResponse response = null;
+// try {
+//
+// setupConnectOptions();
+//
+// checkResponseCode(response = doConnect(opts));
+//
+// return booleanResults(response);
+//
+// } finally {
+//
+// try {
+//
+// if (response != null)
+// EntityUtils.consume(response.getEntity());
+//
+// } catch (Exception ex) {
+//
+// log.warn(ex);
+//
+// }
+//
+// }
}
@@ -1535,90 +1536,6 @@
* @throws Exception
* If anything goes wrong.
*/
- public TupleQueryResult tupleResults(final HttpResponse response)
- throws Exception {
-
- HttpEntity entity = null;
- BackgroundTupleResult result = null;
- try {
-
- entity = response.getEntity();
-
- final String contentType = entity.getContentType().getValue();
-
- final MiniMime mimeType = new MiniMime(contentType);
-
- final TupleQueryResultFormat format = TupleQueryResultFormat
- .forMIMEType(mimeType.getMimeType());
-
- if (format == null)
- throw new IOException(
- "Could not identify format for service response: serviceURI="
- + sparqlEndpointURL + ", contentType=" + contentType
- + " : response=" + getResponseBody(response));
-
- final TupleQueryResultParserFactory parserFactory = TupleQueryResultParserRegistry
- .getInstance().get(format);
-
- if (parserFactory == null)
- throw new IOException(
- "No parser for format for service response: serviceURI="
- + sparqlEndpointURL + ", contentType=" + contentType
- + ", format=" + format + " : response="
- + getResponseBody(response));
-
- final TupleQueryResultParser parser = parserFactory.getParser();
-
- final InputStream in = entity.getContent();
-
- result = new BackgroundTupleResult(parser, in, entity);
-
- executor.execute(result);
-
- final MapBindingSet bindings = new MapBindingSet();
-
- final InsertBindingSetCursor cursor =
- new InsertBindingSetCursor(result, bindings);
-
- final List<String> list = new ArrayList<String>(
- result.getBindingNames());
-
- return new TupleQueryResultImpl(list, cursor);
-
-// final TupleQueryResultBuilder handler = new TupleQueryResultBuilder();
-//
-// parser.setTupleQueryResultHandler(handler);
-//
-// parser.parse(entity.getContent());
-//
-// // done.
-// return handler.getQueryResult();
-
- } finally {
-
-// // terminate the http connection.
-// response.disconnect();
- if (result == null) {
- try {
- EntityUtils.consume(entity);
- } catch (IOException ex) { }
- }
-
- }
-
- }
-
- /**
- * Extracts the solutions from a SPARQL query.
- *
- * @param response
- * The connection from which to read the results.
- *
- * @return The results.
- *
- * @throws Exception
- * If anything goes wrong.
- */
public TupleQueryResult tupleResults(final ConnectOptions opts, final UUID queryId)
throws Exception {
@@ -1693,20 +1610,24 @@
}
@Override
- public synchronized void close() throws QueryEvaluationException {
+ public void close() throws QueryEvaluationException {
- super.close();
+ try {
- if (notDone.compareAndSet(true, false)) {
-
- try {
- cancel(queryId);
- } catch (Exception ex) {
- throw new QueryEvaluationException(ex);
- }
-
- }
+ super.close();
+ } finally {
+
+ if (notDone.compareAndSet(true, false)) {
+
+ try {
+ cancel(queryId);
+ } catch (Exception ex) { }
+
+ }
+
+ }
+
};
};
@@ -1726,7 +1647,7 @@
// // terminate the http connection.
// response.disconnect();
- if (result == null) {
+ if (entity != null && result == null) {
try {
EntityUtils.consume(entity);
} catch (IOException ex) { }
@@ -1753,12 +1674,18 @@
* @throws Exception
* If anything goes wrong.
*/
- public GraphQueryResult graphResults(final HttpResponse response) throws Exception {
+ public GraphQueryResult graphResults(final ConnectOptions opts, final UUID queryId)
+ throws Exception {
+ HttpResponse response = null;
HttpEntity entity = null;
BackgroundGraphResult result = null;
try {
+ response = doConnect(opts);
+
+ checkResponseCode(response);
+
entity = response.getEntity();
final String baseURI = "";
@@ -1808,11 +1735,53 @@
// charset=application/rdf+xml
}
- result = new BackgroundGraphResult(
- parser, entity.getContent(), charset, baseURI, entity);
+ final BackgroundGraphResult tmp = new BackgroundGraphResult(
+ parser, entity.getContent(), charset, baseURI, entity) {
+
+ final AtomicBoolean notDone = new AtomicBoolean(true);
+
+ @Override
+ public boolean hasNext() throws QueryEvaluationException {
+
+ final boolean hasNext = super.hasNext();
+
+ if (hasNext == false) {
+
+ notDone.set(false);
+
+ }
+
+ return hasNext;
+
+ }
+
+ @Override
+ public void close() throws QueryEvaluationException {
+
+ try {
+
+ super.close();
+
+ } finally {
+
+ if (notDone.compareAndSet(true, false)) {
+
+ try {
+ cancel(queryId);
+ } catch (Exception ex) { }
+
+ }
+
+ }
+
+ };
+
+ };
- executor.execute(result);
+ executor.execute(tmp);
+ result = tmp;
+
return result;
// final Graph g = new GraphImpl();
@@ -1828,10 +1797,14 @@
// // terminate the http connection.
// response.disconnect();
- if (result == null) {
+ if (response != null && result == null) {
try {
EntityUtils.consume(entity);
} catch (IOException ex) { }
+
+ try {
+ cancel(queryId);
+ } catch (Exception ex) { }
}
}
@@ -1851,11 +1824,17 @@
* If anything goes wrong, including if the result set does not
* encode a single boolean value.
*/
- protected boolean booleanResults(final HttpResponse response) throws Exception {
+ protected boolean booleanResults(final ConnectOptions opts, final UUID queryId) throws Exception {
+ HttpResponse response = null;
HttpEntity entity = null;
+ Boolean result = null;
try {
+ response = doConnect(opts);
+
+ checkResponseCode(response);
+
entity = response.getEntity();
final String contentType = entity.getContentType().getValue();
@@ -1879,7 +1858,7 @@
final BooleanQueryResultParser parser = factory.getParser();
- final boolean result = parser.parse(entity.getContent());
+ result = parser.parse(entity.getContent());
return result;
@@ -1887,9 +1866,15 @@
// // terminate the http connection.
// response.disconnect();
- try {
- EntityUtils.consume(entity);
- } catch (IOException ex) { }
+ if (result == null) {
+ try {
+ EntityUtils.consume(entity);
+ } catch (IOException ex) { }
+
+ try {
+ cancel(queryId);
+ } catch (Exception ex) { }
+ }
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java 2013-09-28 18:35:26 UTC (rev 7421)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java 2013-09-28 19:18:59 UTC (rev 7422)
@@ -171,27 +171,29 @@
opts.method = "GET";
- HttpResponse response = null;
- GraphQueryResult result = null;
+// HttpResponse response = null;
+// GraphQueryResult result = null;
opts.setAcceptHeader(ConnectOptions.DEFAULT_GRAPH_ACCEPT_HEADER);
- try {
- // check response in try.
- checkResponseCode(response = doConnect(opts));
-
- // return asynchronous parse of result.
- return result = graphResults(response);
-
- } finally {
- if (result == null) {
- // Consume entity if bad response.
- try {
- EntityUtils.consume(response.getEntity());
- } catch (IOException ex) {
- }
- }
- }
+ return graphResults(opts, null);
+
+// try {
+// // check response in try.
+// checkResponseCode(response = doConnect(opts));
+//
+// // return asynchronous parse of result.
+// return result = graphResults(response);
+//
+// } finally {
+// if (result == null) {
+// // Consume entity if bad response.
+// try {
+// EntityUtils.consume(response.getEntity());
+// } catch (IOException ex) {
+// }
+// }
+// }
}
/**
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <jer...@us...> - 2013-10-01 20:15:07
|
Revision: 7424
http://bigdata.svn.sourceforge.net/bigdata/?rev=7424&view=rev
Author: jeremy_carroll
Date: 2013-10-01 20:14:59 +0000 (Tue, 01 Oct 2013)
Log Message:
-----------
Moved and renamed the 500 tests to not just check the optimization step, but also to run the queries
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNoExceptions.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/filterSubselect737.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/nestedSubselectsWithUnion737.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/ticket746.rq
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTOptimizer500s.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/filterSubselect737.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/nestedSubselectsWithUnion737.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/ticket746.rq
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTOptimizer500s.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTOptimizer500s.java 2013-10-01 17:57:50 UTC (rev 7423)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTOptimizer500s.java 2013-10-01 20:14:59 UTC (rev 7424)
@@ -1,192 +0,0 @@
-/**
-
-Copyright (C) SYSTAP, LLC 2006-2013. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-/*
- * Created on Oct 1, 2013
- */
-
-package com.bigdata.rdf.sparql.ast.optimizers;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-
-import org.apache.commons.io.IOUtils;
-import org.openrdf.model.impl.URIImpl;
-import org.openrdf.model.vocabulary.RDF;
-import org.openrdf.model.vocabulary.RDFS;
-import org.openrdf.query.BindingSet;
-import org.openrdf.query.MalformedQueryException;
-import org.openrdf.query.algebra.StatementPattern.Scope;
-import org.openrdf.query.algebra.evaluation.QueryBindingSet;
-
-import com.bigdata.bop.BOpUtility;
-import com.bigdata.bop.IBindingSet;
-import com.bigdata.bop.IVariable;
-import com.bigdata.bop.Var;
-import com.bigdata.bop.bindingSet.EmptyBindingSet;
-import com.bigdata.rdf.internal.IV;
-import com.bigdata.rdf.sail.sparql.Bigdata2ASTSPARQLParser;
-import com.bigdata.rdf.sail.sparql.TestSubqueryPatterns;
-import com.bigdata.rdf.sail.sparql.ast.ParseException;
-import com.bigdata.rdf.sail.sparql.ast.TokenMgrError;
-import com.bigdata.rdf.sparql.ast.ASTContainer;
-import com.bigdata.rdf.sparql.ast.AbstractASTEvaluationTestCase;
-import com.bigdata.rdf.sparql.ast.ConstantNode;
-import com.bigdata.rdf.sparql.ast.IQueryNode;
-import com.bigdata.rdf.sparql.ast.JoinGroupNode;
-import com.bigdata.rdf.sparql.ast.NamedSubqueriesNode;
-import com.bigdata.rdf.sparql.ast.NamedSubqueryInclude;
-import com.bigdata.rdf.sparql.ast.NamedSubqueryRoot;
-import com.bigdata.rdf.sparql.ast.ProjectionNode;
-import com.bigdata.rdf.sparql.ast.QueryRoot;
-import com.bigdata.rdf.sparql.ast.QueryType;
-import com.bigdata.rdf.sparql.ast.StatementPatternNode;
-import com.bigdata.rdf.sparql.ast.StaticAnalysis;
-import com.bigdata.rdf.sparql.ast.SubqueryRoot;
-import com.bigdata.rdf.sparql.ast.ValueExpressionNode;
-import com.bigdata.rdf.sparql.ast.VarNode;
-import com.bigdata.rdf.sparql.ast.eval.AST2BOpContext;
-import com.bigdata.rdf.vocab.decls.FOAFVocabularyDecl;
-
-/**
- * This test suite is for trac items where the failure mode is a 500 error caused
- * by a software error in the static optimizer.
- *
- * The tests each consist of a test query in a file in this package.
- * The typical test succeeds if the optimizers run on this query without a disaster.
- * This test suite does NOT have either of the following objectives:
- * - that the static optimizer is correct in the sense that the optimized query has the same meaning as the original query
- * or
- * - an optimizer in the sense that the optimized query is likely to be faster than the original query.
- *
- * The very limited goal is that no uncaught exceptions are thrown!
- *
- */
-public class TestASTOptimizer500s extends
- AbstractASTEvaluationTestCase {
-
- /**
- *
- */
- public TestASTOptimizer500s() {
- }
-
- /**
- * @param name
- */
- public TestASTOptimizer500s(String name) {
- super(name);
- }
-
-
- /**
- * Unit test for WITH {subquery} AS "name" and INCLUDE. The WITH must be in
- * the top-level query.
- *
- * This is specifically for Trac 746 which crashed out during optimize.
- * So the test simply runs that far, and does not verify anything
- * other than the ability to optimize without an exception
- * @throws IOException
- */
- public void test_namedSubquery746() throws MalformedQueryException,
- TokenMgrError, ParseException, IOException {
- optimizeQuery("ticket746");
-
- }
-
-/**
- * <pre>
-SELECT *
-{ { SELECT * { ?s ?p ?o } LIMIT 1 }
- FILTER ( ?s = <eg:a> )
-}
- </pre>
- * @throws MalformedQueryException
- * @throws TokenMgrError
- * @throws ParseException
- * @throws IOException
- */
- public void test_filterSubselect737() throws MalformedQueryException,
- TokenMgrError, ParseException, IOException {
- optimizeQuery("filterSubselect737");
-
- }
-
-
-/**
- * <pre>
-SELECT *
-WHERE {
-
- { FILTER ( false ) }
- UNION
- {
- { SELECT ?Subject_A
- WHERE {
- { SELECT $j__5 ?Subject_A
- {
- } ORDER BY $j__5
- }
- } GROUP BY ?Subject_A
- }
- }
- OPTIONAL {
- { SELECT ?Subject_A
- WHERE {
- { SELECT $j__8 ?Subject_A
- {
-
- } ORDER BY $j_8
- }
- } GROUP BY ?Subject_A
- }
- }
-}
- </pre>
- * @throws MalformedQueryException
- * @throws TokenMgrError
- * @throws ParseException
- * @throws IOException
- */
- public void test_nestedSubselectsWithUnion737() throws MalformedQueryException,
- TokenMgrError, ParseException, IOException {
- optimizeQuery("nestedSubselectsWithUnion737");
-
- }
-
- void optimizeQuery(final String queryfile) throws IOException, MalformedQueryException {
- final String sparql = IOUtils.toString(getClass().getResourceAsStream(queryfile+".rq"));
-
-
- final QueryRoot ast = new Bigdata2ASTSPARQLParser(store).parseQuery2(sparql,baseURI).getOriginalAST();
-
- final IASTOptimizer rewriter = new DefaultOptimizerList();
-
- final AST2BOpContext context = new AST2BOpContext(new ASTContainer(ast), store);
- rewriter.optimize(context, ast/* queryNode */, new IBindingSet[]{EmptyBindingSet.INSTANCE});
- }
-
-}
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/filterSubselect737.rq
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/filterSubselect737.rq 2013-10-01 17:57:50 UTC (rev 7423)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/filterSubselect737.rq 2013-10-01 20:14:59 UTC (rev 7424)
@@ -1,4 +0,0 @@
-SELECT *
-{ { SELECT * { ?s ?p ?o } LIMIT 1 }
- FILTER ( ?s = <eg:a> )
-}
\ No newline at end of file
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/nestedSubselectsWithUnion737.rq
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/nestedSubselectsWithUnion737.rq 2013-10-01 17:57:50 UTC (rev 7423)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/nestedSubselectsWithUnion737.rq 2013-10-01 20:14:59 UTC (rev 7424)
@@ -1,27 +0,0 @@
-SELECT *
-WHERE {
-
- { FILTER ( false ) }
- UNION
- {
- { SELECT ?Subject_A
- WHERE {
- { SELECT $j__5 ?Subject_A
- {
- } ORDER BY $j__5
- }
- } GROUP BY ?Subject_A
- }
- }
- OPTIONAL {
- { SELECT ?Subject_A
- WHERE {
- { SELECT $j__8 ?Subject_A
- {
-
- } ORDER BY $j_8
- }
- } GROUP BY ?Subject_A
- }
- }
-}
\ No newline at end of file
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/ticket746.rq
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/ticket746.rq 2013-10-01 17:57:50 UTC (rev 7423)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/ticket746.rq 2013-10-01 20:14:59 UTC (rev 7424)
@@ -1,35 +0,0 @@
-base <http://example.org/>
-prefix sys: </bdm/api/kbobject/sys:>
-prefix base: </bdm/api/kbobject/base:>
-prefix syapse: </graph/syapse#>
-prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
-prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
-
-SELECT *
-
-WITH {
-SELECT *
-
-WHERE {
-
- OPTIONAL {
- base:disease syapse:hasLiteralProperty $j2 .
- ?Sample_A $j2 ?j1
- }
- OPTIONAL {
- base:species syapse:hasLiteralProperty $j4 .
- ?Sample_A $j4 ?j3
- }
- OPTIONAL {
- ?Sample_A sys:name ?j5
- }
- ?Sample_A rdf:type / rdfs:subClassOf * base:MammalianCellLineSample
-}
-
-} AS %__UserQuery
-
-WHERE {
-
-INCLUDE %__UserQuery
-
-}
Copied: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNoExceptions.java (from rev 7423, branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTOptimizer500s.java)
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNoExceptions.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNoExceptions.java 2013-10-01 20:14:59 UTC (rev 7424)
@@ -0,0 +1,272 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2013. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*
+ * Created on Oct 1, 2013
+ */
+
+package com.bigdata.rdf.sail;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.Properties;
+
+import org.apache.commons.io.IOUtils;
+import org.openrdf.model.ValueFactory;
+import org.openrdf.model.impl.URIImpl;
+import org.openrdf.model.vocabulary.RDF;
+import org.openrdf.model.vocabulary.RDFS;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.MalformedQueryException;
+import org.openrdf.query.QueryEvaluationException;
+import org.openrdf.query.QueryLanguage;
+import org.openrdf.query.TupleQuery;
+import org.openrdf.query.TupleQueryResult;
+import org.openrdf.query.algebra.StatementPattern.Scope;
+import org.openrdf.query.algebra.evaluation.QueryBindingSet;
+import org.openrdf.repository.RepositoryConnection;
+import org.openrdf.repository.RepositoryException;
+import org.openrdf.repository.sail.SailRepository;
+import org.openrdf.rio.RDFHandlerException;
+import org.openrdf.rio.RDFParseException;
+import org.openrdf.sail.memory.MemoryStore;
+
+import com.bigdata.bop.BOpUtility;
+import com.bigdata.bop.IBindingSet;
+import com.bigdata.bop.IVariable;
+import com.bigdata.bop.Var;
+import com.bigdata.bop.bindingSet.EmptyBindingSet;
+import com.bigdata.rdf.axioms.NoAxioms;
+import com.bigdata.rdf.internal.IV;
+import com.bigdata.rdf.sail.sparql.Bigdata2ASTSPARQLParser;
+import com.bigdata.rdf.sail.sparql.TestSubqueryPatterns;
+import com.bigdata.rdf.sail.sparql.ast.ParseException;
+import com.bigdata.rdf.sail.sparql.ast.TokenMgrError;
+import com.bigdata.rdf.sparql.ast.ASTContainer;
+import com.bigdata.rdf.sparql.ast.AbstractASTEvaluationTestCase;
+import com.bigdata.rdf.sparql.ast.ConstantNode;
+import com.bigdata.rdf.sparql.ast.IQueryNode;
+import com.bigdata.rdf.sparql.ast.JoinGroupNode;
+import com.bigdata.rdf.sparql.ast.NamedSubqueriesNode;
+import com.bigdata.rdf.sparql.ast.NamedSubqueryInclude;
+import com.bigdata.rdf.sparql.ast.NamedSubqueryRoot;
+import com.bigdata.rdf.sparql.ast.ProjectionNode;
+import com.bigdata.rdf.sparql.ast.QueryRoot;
+import com.bigdata.rdf.sparql.ast.QueryType;
+import com.bigdata.rdf.sparql.ast.StatementPatternNode;
+import com.bigdata.rdf.sparql.ast.StaticAnalysis;
+import com.bigdata.rdf.sparql.ast.SubqueryRoot;
+import com.bigdata.rdf.sparql.ast.ValueExpressionNode;
+import com.bigdata.rdf.sparql.ast.VarNode;
+import com.bigdata.rdf.sparql.ast.eval.AST2BOpContext;
+import com.bigdata.rdf.sparql.ast.optimizers.DefaultOptimizerList;
+import com.bigdata.rdf.sparql.ast.optimizers.IASTOptimizer;
+import com.bigdata.rdf.vocab.NoVocabulary;
+import com.bigdata.rdf.vocab.decls.FOAFVocabularyDecl;
+
+/**
+ * This test suite is for trac items where the failure mode is a 500 error caused
+ * by a software error, often in the static optimizer.
+ *
+ * The tests each consist of a test query in a file in this package.
+ * The typical test succeeds if the optimizers run on this query without a disaster.
+ * This test suite does NOT have either of the following objectives:
+ * - that the static optimizer is correct in the sense that the optimized query has the same meaning as the original query
+ * or
+ * - an optimizer in the sense that the optimized query is likely to be faster than the original query.
+ *
+ * The very limited goal is that no uncaught exceptions are thrown!
+ *
+ */
+public class TestNoExceptions extends
+ QuadsTestCase {
+
+ /**
+ *
+ */
+ public TestNoExceptions() {
+ }
+
+ /**
+ * @param name
+ */
+ public TestNoExceptions(String name) {
+ super(name);
+ }
+
+ public AbstractBigdataSailTestCase getOurDelegate() {
+
+ if (getDelegate() == null) {
+
+ String testClass = System.getProperty("testClass");
+ if (testClass != null) {
+ return super.getOurDelegate();
+
+ }
+ setDelegate(new com.bigdata.rdf.sail.TestBigdataSailWithQuads());
+ }
+ return (AbstractBigdataSailTestCase) super.getDelegate();
+ }
+
+ /**
+ * Please set your database properties here, except for your journal file,
+ * please DO NOT SPECIFY A JOURNAL FILE.
+ */
+ @Override
+ public Properties getProperties() {
+
+ final Properties props = super.getProperties();
+
+ /*
+ * For example, here is a set of five properties that turns off
+ * inference, truth maintenance, and the free text index.
+ */
+ props.setProperty(BigdataSail.Options.AXIOMS_CLASS,
+ NoAxioms.class.getName());
+ props.setProperty(BigdataSail.Options.VOCABULARY_CLASS,
+ NoVocabulary.class.getName());
+ props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false");
+ props.setProperty(BigdataSail.Options.JUSTIFY, "false");
+// props.setProperty(BigdataSail.Options.INLINE_DATE_TIMES, "true");
+// props.setProperty(BigdataSail.Options.ISOLATABLE_INDICES, "true");
+// props.setProperty(BigdataSail.Options.EXACT_SIZE, "true");
+// props.setProperty(BigdataSail.Options.ALLOW_SESAME_QUERY_EVALUATION,
+// "false");
+ props.setProperty(BigdataSail.Options.STATEMENT_IDENTIFIERS, "false");
+
+ return props;
+
+ }
+ /**
+ * Unit test for WITH {subquery} AS "name" and INCLUDE. The WITH must be in
+ * the top-level query.
+ *
+ * This is specifically for Trac 746 which crashed out during optimize.
+ * So the test simply runs that far, and does not verify anything
+ * other than the ability to optimize without an exception
+ * @throws IOException
+ */
+ public void test_namedSubquery746() throws Exception,
+ TokenMgrError, ParseException, IOException {
+ optimizeQuery("ticket746");
+
+ }
+
+/**
+ * <pre>
+SELECT *
+{ { SELECT * { ?s ?p ?o } LIMIT 1 }
+ FILTER ( ?s = <eg:a> )
+}
+ </pre>
+ * @throws MalformedQueryException
+ * @throws TokenMgrError
+ * @throws ParseException
+ * @throws IOException
+ */
+ public void test_filterSubselect737() throws Exception,
+ TokenMgrError, ParseException, IOException {
+ optimizeQuery("filterSubselect737");
+
+ }
+
+
+/**
+ * <pre>
+SELECT *
+WHERE {
+
+ { FILTER ( false ) }
+ UNION
+ {
+ { SELECT ?Subject_A
+ WHERE {
+ { SELECT $j__5 ?Subject_A
+ {
+ } ORDER BY $j__5
+ }
+ } GROUP BY ?Subject_A
+ }
+ }
+ OPTIONAL {
+ { SELECT ?Subject_A
+ WHERE {
+ { SELECT $j__8 ?Subject_A
+ {
+
+ } ORDER BY $j_8
+ }
+ } GROUP BY ?Subject_A
+ }
+ }
+}
+ </pre>
+ * @throws MalformedQueryException
+ * @throws TokenMgrError
+ * @throws ParseException
+ * @throws IOException
+ */
+ public void test_nestedSubselectsWithUnion737() throws Exception,
+ TokenMgrError, ParseException, IOException {
+ optimizeQuery("nestedSubselectsWithUnion737");
+
+ }
+
+ void optimizeQuery(final String queryfile) throws Exception {
+ final String sparql = IOUtils.toString(getClass().getResourceAsStream(queryfile+".rq"));
+ // try with Bigdata:
+ final BigdataSail sail = getSail();
+ try {
+ executeQuery(new BigdataSailRepository(sail),sparql);
+ } finally {
+ sail.__tearDownUnitTest();
+ }
+
+ }
+
+ private void executeQuery(final SailRepository repo, final String query)
+ throws RepositoryException, MalformedQueryException,
+ QueryEvaluationException, RDFParseException, IOException,
+ RDFHandlerException {
+ try {
+ repo.initialize();
+ final RepositoryConnection conn = repo.getConnection();
+ conn.setAutoCommit(false);
+ try {
+ final ValueFactory vf = conn.getValueFactory();
+ conn.commit();
+ TupleQuery tq = conn.prepareTupleQuery(QueryLanguage.SPARQL, query);
+ TupleQueryResult tqr = tq.evaluate();
+ tqr.close();
+ } finally {
+ conn.close();
+ }
+ } finally {
+ repo.shutDown();
+ }
+ }
+
+}
Copied: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/filterSubselect737.rq (from rev 7423, branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/filterSubselect737.rq)
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/filterSubselect737.rq (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/filterSubselect737.rq 2013-10-01 20:14:59 UTC (rev 7424)
@@ -0,0 +1,4 @@
+SELECT *
+{ { SELECT * { ?s ?p ?o } LIMIT 1 }
+ FILTER ( ?s = <eg:a> )
+}
\ No newline at end of file
Copied: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/nestedSubselectsWithUnion737.rq (from rev 7423, branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/nestedSubselectsWithUnion737.rq)
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/nestedSubselectsWithUnion737.rq (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/nestedSubselectsWithUnion737.rq 2013-10-01 20:14:59 UTC (rev 7424)
@@ -0,0 +1,27 @@
+SELECT *
+WHERE {
+
+ { FILTER ( false ) }
+ UNION
+ {
+ { SELECT ?Subject_A
+ WHERE {
+ { SELECT $j__5 ?Subject_A
+ {
+ } ORDER BY $j__5
+ }
+ } GROUP BY ?Subject_A
+ }
+ }
+ OPTIONAL {
+ { SELECT ?Subject_A
+ WHERE {
+ { SELECT $j__8 ?Subject_A
+ {
+
+ } ORDER BY $j_8
+ }
+ } GROUP BY ?Subject_A
+ }
+ }
+}
\ No newline at end of file
Copied: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/ticket746.rq (from rev 7423, branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/ticket746.rq)
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/ticket746.rq (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/ticket746.rq 2013-10-01 20:14:59 UTC (rev 7424)
@@ -0,0 +1,35 @@
+base <http://example.org/>
+prefix sys: </bdm/api/kbobject/sys:>
+prefix base: </bdm/api/kbobject/base:>
+prefix syapse: </graph/syapse#>
+prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
+prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
+
+SELECT *
+
+WITH {
+SELECT *
+
+WHERE {
+
+ OPTIONAL {
+ base:disease syapse:hasLiteralProperty $j2 .
+ ?Sample_A $j2 ?j1
+ }
+ OPTIONAL {
+ base:species syapse:hasLiteralProperty $j4 .
+ ?Sample_A $j4 ?j3
+ }
+ OPTIONAL {
+ ?Sample_A sys:name ?j5
+ }
+ ?Sample_A rdf:type / rdfs:subClassOf * base:MammalianCellLineSample
+}
+
+} AS %__UserQuery
+
+WHERE {
+
+INCLUDE %__UserQuery
+
+}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2013-10-11 13:50:25
|
Revision: 7447
http://bigdata.svn.sourceforge.net/bigdata/?rev=7447&view=rev
Author: thompsonbry
Date: 2013-10-11 13:50:11 +0000 (Fri, 11 Oct 2013)
Log Message:
-----------
Checkpoint providing resolution for #723 (HA asynchronous tasks must be canceled when invariants are changed) and #718 (HAJournalServer needs to handle ZK client connection loss).
Some remaining todos have been identified in working these tickets:
For #723, we want to add an assertLeader() invariant and then use this in sendHALog() and sendHAStore() to guard an additional invariant.
Also for #723, we want to add an invariant that the zk client is connected. This will need to hook the QuorumClient.disconnected() method.
For #718, we are not aware of any problems. However, we need to provide more test coverage. We will add a bounce2() and bounce3() test. We will also try to parameterize the test suite for tests that currently do a service shutdown and restart with an enum indicating whether we should do a service shutdown, sudden kill, or dropZookeeper for the service(s) that get shutdown in our failover test suite.
AbstractQuorum.interruptAll() should be invoked from the ErrorTask. This will interrupt any inflight requests for the actor to acomplish a quorum state transition. If the actor attempts to cause two different state transitions, this can lead to a deadlock since one of the expected conditions might no be achieved. This situation has been observed before for AbstractQuorum.terminate().
AbstractQuorum.getClient() has been observed to block. It will be rewritten to use a non-blocking access method as this is currently causing contention that is not appropriate, e.g., for getExtendedRunState() versus doServiceLeave().
We will add an invariant listener to the digest utility to ensure that the locks are released if the client computing the digest is disconnected from zookeeper.
We have updated the zookeeper dependency to 3.4.5.
See #723 (HA asynchronous tasks must be canceled when invariants are changed)
See #718 (HAJournalServer needs to handle ZK client connection loss)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/.classpath
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Journal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorumClient.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumClient.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/DumpLogDigests.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/zookeeper/ZooKeeperAccessor.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java
branches/BIGDATA_RELEASE_1_3_0/build.properties
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/concurrent/FutureTaskInvariantMon.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/lib/apache/zookeeper-3.4.5.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/InvariantTask.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3InvariantListener.java
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/lib/apache/zookeeper-3.3.3.jar
Modified: branches/BIGDATA_RELEASE_1_3_0/.classpath
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/.classpath 2013-10-10 17:18:39 UTC (rev 7446)
+++ branches/BIGDATA_RELEASE_1_3_0/.classpath 2013-10-11 13:50:11 UTC (rev 7447)
@@ -33,7 +33,7 @@
<classpathentry kind="src" path="bigdata-gas/src/test"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/dsi-utils-1.0.6-020610.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/lgpl-utils-1.0.6-020610.jar"/>
- <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/apache/zookeeper-3.3.3.jar"/>
+ <classpathentry kind="lib" path="bigdata-jini/lib/apache/zookeeper-3.4.5.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-continuation-7.2.2.v20101205.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-http-7.2.2.v20101205.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-io-7.2.2.v20101205.jar"/>
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/concurrent/FutureTaskInvariantMon.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/concurrent/FutureTaskInvariantMon.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/concurrent/FutureTaskInvariantMon.java 2013-10-11 13:50:11 UTC (rev 7447)
@@ -0,0 +1,302 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.concurrent;
+
+import java.io.Serializable;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Future;
+import java.util.concurrent.FutureTask;
+
+import org.apache.log4j.Logger;
+
+import com.bigdata.ha.HAGlue;
+import com.bigdata.ha.QuorumService;
+import com.bigdata.quorum.Quorum;
+import com.bigdata.quorum.QuorumEvent;
+import com.bigdata.quorum.QuorumEventEnum;
+import com.bigdata.quorum.QuorumListener;
+import com.bigdata.util.StackInfoReport;
+
+/**
+ * A {@link Future} that allows you to cancel a computation if an invariant is
+ * violated. This class is specifically designed to monitor quorum related
+ * invariants for HA.
+ * <p>
+ * Once an invariant is established, listening for the relevant quorum events
+ * commences and a check is made to verify that the invariant holds on entry.
+ * This pattern ensures there is no possibility of a missed event.
+ * <p>
+ * This {@link FutureTask} wrapper will return the value of the {@link Callable}
+ * (or the specified result for the {@link Runnable}) iff the task completes
+ * successfully without the invariant being violated.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ * @param <T>
+ * The generic type of the future.
+ */
+public abstract class FutureTaskInvariantMon<T> extends FutureTaskMon<T>
+ implements QuorumListener {
+
+ private static final Logger log = Logger.getLogger(FutureTaskInvariantMon.class);
+
+ private final Quorum<HAGlue, QuorumService<HAGlue>> m_quorum;
+
+ private final List<QuorumEventInvariant> m_triggers = new CopyOnWriteArrayList<QuorumEventInvariant>();
+
+ public FutureTaskInvariantMon(final Callable<T> callable,
+ final Quorum<HAGlue, QuorumService<HAGlue>> quorum) {
+
+ super(callable);
+
+ if (quorum == null)
+ throw new IllegalArgumentException();
+
+ m_quorum = quorum;
+
+ }
+
+ public FutureTaskInvariantMon(final Runnable runnable, final T result,
+ Quorum<HAGlue, QuorumService<HAGlue>> quorum) {
+
+ super(runnable, result);
+
+ if (quorum == null)
+ throw new IllegalArgumentException();
+
+ m_quorum = quorum;
+
+ }
+
+ /**
+ * Concrete implementations use this callback hook to establish the
+ * invariants to be monitored.
+ */
+ abstract protected void establishInvariants();
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Hook to manage listener registration and establish invariants.
+ */
+ @Override
+ public void run() {
+ m_quorum.addListener(this);
+ try {
+ establishInvariants();
+
+ super.run();
+ } finally {
+ m_quorum.removeListener(this);
+ }
+ }
+
+ /**
+ * Establish an invariant that the specified service is a member of the
+ * quorum.
+ *
+ * @param serviceId
+ * The service.
+ */
+ public void assertMember(final UUID serviceId) {
+ m_triggers.add(new QuorumEventInvariant(QuorumEventEnum.MEMBER_REMOVE,
+ serviceId));
+
+ // now check that already a member and break if not
+ assertMembership(m_quorum.getMembers(), serviceId);
+ }
+
+ /**
+ * Establish an invariant that the specified service is joined with the met
+ * quorum.
+ *
+ * @param serviceId
+ * The service.
+ */
+ public void assertJoined(final UUID serviceId) {
+ m_triggers.add(new QuorumEventInvariant(QuorumEventEnum.SERVICE_LEAVE,
+ serviceId));
+
+ // now check that already joined and break if not
+ assertMembership(m_quorum.getJoined(), serviceId);
+ }
+
+ /**
+ * Establish an invariant that the specified service is a not joined with
+ * the met quorum.
+ *
+ * @param serviceId
+ * The service.
+ */
+ public void assertNotJoined(final UUID serviceId) {
+ m_triggers.add(new QuorumEventInvariant(QuorumEventEnum.SERVICE_JOIN,
+ serviceId));
+
+ // now check not already joined and break if it is
+ if (isMember(m_quorum.getJoined(), serviceId))
+ broken();
+ }
+
+ /**
+ * Establish an invariant that the specified service is in the quorum
+ * pipeline.
+ *
+ * @param serviceId
+ * The service.
+ */
+ public void assertInPipeline(final UUID serviceId) {
+ m_triggers.add(new QuorumEventInvariant(
+ QuorumEventEnum.PIPELINE_REMOVE, serviceId));
+
+ // now check that already in pipeline and break if not
+ assertMembership(m_quorum.getPipeline(), serviceId);
+ }
+
+ /**
+ * Establish an invariant that the quorum is met.
+ */
+ public void assertQuorumMet() {
+ m_triggers.add(new QuorumEventInvariant(QuorumEventEnum.QUORUM_BROKE,
+ null/* serviceId */));
+
+ // now check that quorum is met and break if not
+ if (!m_quorum.isQuorumMet())
+ broken();
+ }
+
+ /**
+ * Establish an invariant that the quorum is fully met.
+ */
+ public void assertQuorumFullyMet() {
+ // no-one must leave!
+ m_triggers.add(new QuorumEventInvariant(QuorumEventEnum.SERVICE_LEAVE,
+ null/* serviceId */));
+
+ // now check that quorum is fully met on the current token and break if not
+ if (!m_quorum.isQuorumFullyMet(m_quorum.token()))
+ broken();
+ }
+
+ private void assertMembership(final UUID[] members, final UUID serviceId) {
+ if (isMember(members, serviceId))
+ return;
+
+ broken();
+ }
+
+ private boolean isMember(final UUID[] members, final UUID serviceId) {
+ for (UUID member : members) {
+ if (member.equals(serviceId))
+ return true;
+ }
+
+ return false;
+ }
+
+ /**
+ * Any QuorumEvent must be checked to see if it matches an Invariant trigger
+ */
+ @Override
+ public void notify(final QuorumEvent e) {
+ boolean interrupt = false;
+
+ for (QuorumEventInvariant inv : m_triggers) {
+ if (inv.matches(e)) {
+ interrupt = true;
+ break;
+ }
+ }
+
+ // interrupt the call thread
+ if (interrupt) {
+ broken();
+ } else {
+ if (log.isDebugEnabled())
+ log.debug("Ignoring event: " + e);
+ }
+ }
+
+ private void broken() {
+ log.warn("BROKEN", new StackInfoReport());
+
+ cancel(true/*mayInterruptIfRunning*/);
+ }
+
+ @SuppressWarnings("serial")
+ private class QuorumEventInvariant implements QuorumEvent, Serializable {
+
+ private final QuorumEventEnum m_qe;
+ private final UUID m_sid;
+
+ /**
+ *
+ * @param qe
+ * The {@link QuorumEvent} type (required).
+ * @param sid
+ * The service {@link UUID} (optional). When <code>null</code>
+ * the {@link QuorumEventEnum} will be matched for ANY service
+ * {@link UUID}.
+ */
+ public QuorumEventInvariant(final QuorumEventEnum qe, final UUID sid) {
+ if (qe == null)
+ throw new IllegalArgumentException();
+ m_qe = qe;
+ m_sid = sid;
+ }
+
+ @Override
+ public QuorumEventEnum getEventType() {
+ return m_qe;
+ }
+
+ @Override
+ public long lastValidToken() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long token() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public UUID getServiceId() {
+ return m_sid;
+ }
+
+ @Override
+ public long lastCommitTime() {
+ throw new UnsupportedOperationException();
+ }
+
+ public boolean matches(final QuorumEvent qe) {
+ return qe.getEventType() == m_qe
+ && (m_sid == null || m_sid.equals(qe.getServiceId()));
+ }
+
+ }
+
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2013-10-10 17:18:39 UTC (rev 7446)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2013-10-11 13:50:11 UTC (rev 7447)
@@ -1,2095 +1,2133 @@
-/**
-
-Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-package com.bigdata.ha;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-import java.util.Iterator;
-import java.util.UUID;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.FutureTask;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.RunnableFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.log4j.Logger;
-
-import com.bigdata.ha.msg.HAWriteMessageBase;
-import com.bigdata.ha.msg.IHALogRequest;
-import com.bigdata.ha.msg.IHAMessage;
-import com.bigdata.ha.msg.IHASyncRequest;
-import com.bigdata.ha.msg.IHAWriteMessage;
-import com.bigdata.ha.pipeline.HAReceiveService;
-import com.bigdata.ha.pipeline.HAReceiveService.IHAReceiveCallback;
-import com.bigdata.ha.pipeline.HASendService;
-import com.bigdata.io.DirectBufferPool;
-import com.bigdata.io.IBufferAccess;
-import com.bigdata.quorum.QCE;
-import com.bigdata.quorum.Quorum;
-import com.bigdata.quorum.QuorumException;
-import com.bigdata.quorum.QuorumMember;
-import com.bigdata.quorum.QuorumStateChangeEvent;
-import com.bigdata.quorum.QuorumStateChangeEventEnum;
-import com.bigdata.quorum.QuorumStateChangeListener;
-import com.bigdata.quorum.QuorumStateChangeListenerBase;
-import com.bigdata.util.InnerCause;
-
-/**
- * {@link QuorumPipeline} implementation.
- * <p>
- * The {@link QuorumMember} must pass along the "pipeline" messages, including:
- * <ul>
- * <li>{@link QuorumMember#pipelineAdd()}</li>
- * <li>{@link QuorumMember#pipelineRemove()}</li>
- * <li>{@link QuorumMember#pipelineChange(UUID, UUID)}</li>
- * </ul>
- * When a quorum is met, the <i>leader</i> is always first in the write pipeline
- * since it is the node which receives writes from clients. When a service joins
- * the write pipeline, it always does so at the end of the chain. Services may
- * enter the write pipeline before joining a quorum in order to synchronize with
- * the quorum. If a service in the middle of the chain leaves the pipeline, then
- * the upstream node will reconfigure and retransmit the current cache block to
- * its new downstream node. This prevent nodes which are "bouncing" during
- * synchronization from causing write sets to be discarded. However, if the
- * leader leaves the write pipeline, then the quorum is broken and the write set
- * will be discarded.
- * <p>
- * Since the write pipeline is used to synchronize services trying to join the
- * quorum as well as the replicate writes for services joined with the quorum,
- * {@link HAReceiveService} may be live for a met quorum even though the
- * {@link QuorumMember} on whose behalf this class is acting is not joined with
- * the met quorum.
- *
- * <h3>Pipeline maintenance</h3>
- *
- * There are three broad categories which have to be handled: (1) leader leaves;
- * (2) pipeline leader election; and (3) follower leaves. A leader leave causes
- * the quorum to break, which will cause service leaves and pipeline leaves for
- * all joined services. However, services must add themselves to the pipeline
- * before they join the quorum and the pipeline will be reorganized if necessary
- * when the quorum leader is elected. This will result in a
- * {@link #pipelineElectedLeader()} event. A follower leave only causes the
- * follower to leave the pipeline and results in a
- * {@link #pipelineChange(UUID, UUID)} event.
- * <p>
- * There are two cases for a follower leave: (A) when the follower did not did
- * not have a downstream node; and (B) when there is downstream node. For (B),
- * the upstream node from the left follower should reconfigure for the new
- * downstream node and retransmit the current cache block and the event should
- * be otherwise unnoticed.
- * <p>
- * Handling a follower join requires us to synchronize the follower first which
- * requires some more infrastructure and should be done as part of the HA
- * synchronization test suite.
- * <p>
- * What follows is an example of how events will arrive for a quorum of three
- * services: A, B, and C.
- *
- * <pre>
- * A.getActor().pipelineAdd() => A.pipelineAdd()
- * B.getActor().pipelineAdd() => B.pipelineAdd(); A.pipelineChange(null,B);
- * C.getActor().pipelineAdd() => C.pipelineAdd(); B.pipelineChange(null,C);
- * </pre>
- *
- * At this point the pipeline order is <code>[A,B,C]</code>. Notice that the
- * {@link HASendService} for A is not established until the
- * <code>A.pipelineChange(null,B)</code> sets B as the new downstream service
- * for A. Likewise, B will not relay to C until it handles the
- * <code>B.pipelineChange(null,C)</code> event.
- *
- * <p>
- *
- * Given the pipeline order <code>[A,B,C]</code>, if B were to leave, then the
- * events would be:
- *
- * <pre>
- * B.getActor().pipelineRemove() => B.pipelineRemove(); A.pipelineChange(B,C);
- * </pre>
- *
- * and when this class handles the <code>A.pipelineChange(B,C)</code> event, it
- * must update the {@link HAReceiveService} such that it now relays data to C.
- *
- * <p>
- *
- * On the other hand, given the pipeline order <code>[A,B,C]</code>, if C were
- * to leave the events would be:
- *
- * <pre>
- * C.getActor().pipelineRemove() => C.pipelineRemove(); B.pipelineChange(C,null);
- * </pre>
- *
- * and when this class handles the <code>B.pipelineChange(C,null)</code> event,
- * it must update the C's {@link HAReceiveService} such that it continues to
- * receive data, but no longer relays data to a downstream service.
- *
- * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @param <S>
- *
- * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/681" >
- * HAJournalServer deadlock: pipelineRemove() and getLeaderId() </a>
- */
-abstract public class QuorumPipelineImpl<S extends HAPipelineGlue> /*extends
- QuorumStateChangeListenerBase */implements QuorumPipeline<S>,
- QuorumStateChangeListener {
-
- static private transient final Logger log = Logger
- .getLogger(QuorumPipelineImpl.class);
-
- /**
- * The timeouts for a sleep before the next retry. These timeouts are
- * designed to allow some asynchronous processes to reconnect the
- * {@link HASendService} and the {@link HAReceiveService}s in write pipeline
- * such that a retransmit can succeed after a service has left the pipeline.
- * Depending on the nature of the error (i.e., a transient network problem
- * versus a pipeline reorganization), this can involve a number of zookeeper
- * events. Hence the sleep latency is backed off through this array of
- * values.
- *
- * TODO We do not want to induce too much latency here. It would be nice if
- * we automatically retried after each relevant quorum event that might cure
- * the problem as well as after a timeout. This would require a Condition
- * that we await with a timeout and signaling the Condition if there are any
- * relevant events (probably once we handle them locally).
- */
- static protected final int RETRY_SLEEP[] = new int[] { 100, 200, 200, 500, 500, 1000 };
-
- /**
- * The {@link QuorumMember}.
- */
- protected final QuorumMember<S> member;
-
- /**
- * The service {@link UUID} for the {@link QuorumMember}.
- */
- protected final UUID serviceId;
-
- /**
- * Lock managing the various mutable aspects of the pipeline state.
- */
- private final ReentrantLock lock = new ReentrantLock();
-
- /** send service (iff this is the leader). */
- private HASendService sendService;
-
- /**
- * The receive service (iff this is a follower in a met quorum).
- */
- private HAReceiveService<HAMessageWrapper> receiveService;
-
- /**
- * The buffer used to relay the data. This is only allocated for a
- * follower.
- */
- private IBufferAccess receiveBuffer;
-
- /**
- * Cached metadata about the downstream service.
- */
- private final AtomicReference<PipelineState<S>> pipelineStateRef = new AtomicReference<PipelineState<S>>();
-
- /**
- * Inner class does the actual work once to handle an event.
- */
- private final InnerEventHandler innerEventHandler = new InnerEventHandler();
-
- /**
- * Core implementation of the handler for the various events. Always run
- * while holding the {@link #lock}.
- *
- * @author <a href="mailto:tho...@us...">Bryan
- * Thompson</a>
- *
- * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/681" >
- * HAJournalServer deadlock: pipelineRemove() and getLeaderId() </a>
- */
- private final class InnerEventHandler extends QuorumStateChangeListenerBase {
-
- /**
- * A queue of events that can only be handled when a write replication
- * operation owns the {@link QuorumPipelineImpl#lock}.
- *
- * @see QuorumPipelineImpl#lock()
- * @see #dispatchEvents()
- */
- private final BlockingQueue<QuorumStateChangeEvent> queue = new LinkedBlockingQueue<QuorumStateChangeEvent>();
-
- protected InnerEventHandler() {
-
- }
-
- /**
- * Enqueue an event.
- *
- * @param e
- * The event.
- */
- private void queue(final QuorumStateChangeEvent e) {
-
- if (log.isInfoEnabled())
- log.info("Adding StateChange: " + e);
-
- queue.add(e);
-
- }
-
- /**
- * Boolean controls whether or not event elision is used. See below.
- *
- * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/681" >
- * HAJournalServer deadlock: pipelineRemove() and getLeaderId()
- * </a>
- */
- static private final boolean s_eventElission = true;
-
- /**
- * Event elission endeavours to ensure that events processed
- * represent current state change.
- *
- * This is best explained with an example from its original usage
- * in processing graphic events. Whilst a "button click" is a singular
- * event and all button clicks should be processed, a "mouse move" event
- * could be elided with the next "mouse move" event. Thus the move events
- * (L1 -> L2) and (L2 -> L3) would elide to a single (L1 -> L3).
- *
- * In HA RMI calls can trigger event processing, whilst other threads monitor
- * state changes - such as open sockets. Without elission, monitoring threads
- * will observe unnecessary transitional state changes. HOWEVER, there remains
- * a problem with this pattern of synchronization.
- */
- private void elideEvents() {
-
- if (!s_eventElission) {
- return;
- }
-
- /*
- * Check for event elission: check for PIPELINE_UPSTREAM and
- * PIPELINE_CHANGE and remove earlier ones check for PIPELINE_ADD
- * and PIPELINE_REMOVE pairings.
- */
- final Iterator<QuorumStateChangeEvent> events = queue.iterator();
- QuorumStateChangeEvent uce = null; // UPSTREAM CHANGE
- QuorumStateChangeEvent dce = null; // DOWNSTREAM CHANGE
- QuorumStateChangeEvent add = null; // PIPELINE_ADD
-
- while (events.hasNext()) {
- final QuorumStateChangeEvent tst = events.next();
- if (tst.getEventType() == QuorumStateChangeEventEnum.PIPELINE_UPSTREAM_CHANGE) {
- if (uce != null) {
- if (log.isDebugEnabled())
- log.debug("Elission removal of: " + uce);
- queue.remove(uce);
- }
- uce = tst;
- } else if (tst.getEventType() == QuorumStateChangeEventEnum.PIPELINE_CHANGE) {
- if (dce != null) {
- // replace 'from' of new state with 'from' of old
- tst.getDownstreamOldAndNew()[0] = dce
- .getDownstreamOldAndNew()[0];
-
- if (log.isDebugEnabled())
- log.debug("Elission removal of: " + dce);
- queue.remove(dce);
- }
- dce = tst;
- } else if (tst.getEventType() == QuorumStateChangeEventEnum.PIPELINE_ADD) {
- add = tst;
- } else if (tst.getEventType() == QuorumStateChangeEventEnum.PIPELINE_REMOVE) {
- if (add != null) {
- if (log.isDebugEnabled()) {
- log.debug("Elission removal of: " + add);
- log.debug("Elission removal of: " + tst);
- }
- queue.remove(add);
- queue.remove(tst);
- add = null;
- }
- if (dce != null) {
- if (log.isDebugEnabled())
- log.debug("Elission removal of: " + dce);
- queue.remove(dce);
- dce = null;
- }
- if (uce != null) {
- if (log.isDebugEnabled())
- log.debug("Elission removal of: " + uce);
- queue.remove(uce);
- uce = null;
- }
- }
-
- }
-
- } // elideEvents()
-
- /**
- * Dispatch any events in the {@link #queue}.
- */
- private void dispatchEvents() {
-
- elideEvents();
-
- QuorumStateChangeEvent e;...
[truncated message content] |
|
From: <tho...@us...> - 2013-10-11 15:05:25
|
Revision: 7448
http://bigdata.svn.sourceforge.net/bigdata/?rev=7448&view=rev
Author: thompsonbry
Date: 2013-10-11 15:05:17 +0000 (Fri, 11 Oct 2013)
Log Message:
-----------
Added pre-condition test to sendHAStore() and sendHALog() that service is leader. Invariant is that quorum remains met, therefore invariant also verifies that the service remains the leader.
ZKQuorumImpl now generates a QUORUM_DISCONNECTED event. This can now be monitored by the invariant listener (TODO).
AbstractQuorum modified to expose some methods to ZKQuorumImpl for sendEvent().
AbstractQuorum.client made volatile, but getClient() and getClientAsMember() still use lock (we will change that next).
- http://sourceforge.net/apps/trac/bigdata/ticket/718 (HAJournalServer needs to handle ZK client connection loss)
- http://sourceforge.net/apps/trac/bigdata/ticket/723 (HA asynchronous tasks must be canceled when invariants are changed)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/concurrent/FutureTaskInvariantMon.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumEventEnum.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumImpl.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/concurrent/FutureTaskInvariantMon.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/concurrent/FutureTaskInvariantMon.java 2013-10-11 13:50:11 UTC (rev 7447)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/concurrent/FutureTaskInvariantMon.java 2013-10-11 15:05:17 UTC (rev 7448)
@@ -64,6 +64,10 @@
private static final Logger log = Logger.getLogger(FutureTaskInvariantMon.class);
private final Quorum<HAGlue, QuorumService<HAGlue>> m_quorum;
+ /**
+ * The quorum token on entry.
+ */
+ private final long token;
private final List<QuorumEventInvariant> m_triggers = new CopyOnWriteArrayList<QuorumEventInvariant>();
@@ -77,6 +81,8 @@
m_quorum = quorum;
+ token = quorum.token();
+
}
public FutureTaskInvariantMon(final Runnable runnable, final T result,
@@ -89,6 +95,8 @@
m_quorum = quorum;
+ token = quorum.token();
+
}
/**
@@ -103,17 +111,25 @@
* Hook to manage listener registration and establish invariants.
*/
@Override
- public void run() {
- m_quorum.addListener(this);
- try {
- establishInvariants();
-
- super.run();
- } finally {
- m_quorum.removeListener(this);
- }
- }
+ public void run() {
+ boolean didStart = false;
+ m_quorum.addListener(this);
+ try {
+ establishInvariants();
+ didStart = true;
+ super.run();
+ } finally {
+ m_quorum.removeListener(this);
+ if (!didStart) {
+ /*
+ * Guarantee cancelled unless run() invoked.
+ */
+ cancel(true/* mayInterruptIfRunning */);
+ }
+ }
+ }
+
/**
* Establish an invariant that the specified service is a member of the
* quorum.
@@ -176,7 +192,8 @@
}
/**
- * Establish an invariant that the quorum is met.
+ * Establish an invariant that the quorum is met and remains met on the same
+ * token.
*/
public void assertQuorumMet() {
m_triggers.add(new QuorumEventInvariant(QuorumEventEnum.QUORUM_BROKE,
@@ -185,6 +202,8 @@
// now check that quorum is met and break if not
if (!m_quorum.isQuorumMet())
broken();
+ if (m_quorum.token() != token)
+ broken();
}
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-10-11 13:50:11 UTC (rev 7447)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-10-11 15:05:17 UTC (rev 7448)
@@ -292,10 +292,15 @@
/**
* The {@link QuorumClient}.
+ * <p>
+ * Note: This is volatile to allow visibility without holding the
+ * {@link #lock}. The field is only modified in {@link #start(QuorumClient)}
+ * and {@link #terminate()}, and those methods use the {@link #lock} to
+ * impose an appropriate ordering over events.
*
* @see #start(QuorumClient)
*/
- private C client;
+ private volatile C client;
/**
* An object which watches the distributed state of the quorum and informs
@@ -718,7 +723,8 @@
public C getClient() {
lock.lock();
try {
- if (this.client == null)
+ final C client = this.client;
+ if (client == null)
throw new IllegalStateException();
return client;
} finally {
@@ -729,7 +735,8 @@
public QuorumMember<S> getMember() {
lock.lock();
try {
- if (this.client == null)
+ final C client = this.client;
+ if (client == null)
throw new IllegalStateException();
if (client instanceof QuorumMember<?>) {
return (QuorumMember<S>) client;
@@ -754,6 +761,8 @@
*/
private QuorumMember<S> getClientAsMember() {
+ final C client = this.client;
+
if (client instanceof QuorumMember<?>) {
return (QuorumMember<S>) client;
@@ -3262,7 +3271,7 @@
* @param e
* The event.
*/
- private void sendEvent(final QuorumEvent e) {
+ protected void sendEvent(final QuorumEvent e) {
if (log.isTraceEnabled())
log.trace("" + e);
if (sendSynchronous) {
@@ -3423,7 +3432,7 @@
* @param t
* The throwable.
*/
- private void launderThrowable(final Throwable t) {
+ protected void launderThrowable(final Throwable t) {
if (InnerCause.isInnerCause(t, InterruptedException.class)) {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumEventEnum.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumEventEnum.java 2013-10-11 13:50:11 UTC (rev 7447)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumEventEnum.java 2013-10-11 15:05:17 UTC (rev 7448)
@@ -96,6 +96,11 @@
/**
* Event generated when a quorum breaks (aka when the token is cleared).
*/
- QUORUM_BROKE;
+ QUORUM_BROKE,
+ /**
+ * Event generated when a service becomes disconnected from a remote quorum
+ * (such as a zookeeper ensemble).
+ */
+ QUORUM_DISCONNECTED;
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-10-11 13:50:11 UTC (rev 7447)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-10-11 15:05:17 UTC (rev 7448)
@@ -896,6 +896,9 @@
// The commit counter of the desired closing root block.
final long commitCounter = req.getCommitCounter();
+ // Note the token on entry.
+ final long token = getQuorum().token();
+
/*
* Open the HALog file. If it exists, then we will run a task to
* send it along the pipeline.
@@ -928,16 +931,25 @@
isLive = r.isLive();
// Task sends an HALog file along the pipeline.
- ft = new FutureTaskInvariantMon<Void>(new SendHALogTask(req, r), getQuorum()) {
+ ft = new FutureTaskInvariantMon<Void>(new SendHALogTask(
+ req, r), getQuorum()) {
- @Override
- protected void establishInvariants() {
- assertQuorumMet();
- assertJoined(getServiceId());
- assertMember(req.getServiceId());
- assertInPipeline(req.getServiceId());
- }
-
+ @Override
+ protected void establishInvariants() {
+ assertQuorumMet();
+ assertJoined(getServiceId());
+ assertMember(req.getServiceId());
+ assertInPipeline(req.getServiceId());
+ /*
+ * Note: This is a pre-condition, not an invariant.
+ * We verify on entry that this service is the
+ * leader. The invariant is that the quorum remains
+ * met on the current token, which is handled by
+ * assertQuorumMet().
+ */
+ getQuorum().assertLeader(token);
+ }
+
};
// Run task.
@@ -1126,18 +1138,28 @@
if (haLog.isDebugEnabled())
haLog.debug("req=" + req);
+ // Note the token on entry.
+ final long token = getQuorum().token();
+
// Task sends an HALog file along the pipeline.
final FutureTask<IHASendStoreResponse> ft = new FutureTaskInvariantMon<IHASendStoreResponse>(
new SendStoreTask(req), getQuorum()) {
- @Override
- protected void establishInvariants() {
- assertQuorumMet();
- assertJoined(getServiceId());
- assertMember(req.getServiceId());
- assertInPipeline(req.getServiceId());
- }
-
+ @Override
+ protected void establishInvariants() {
+ assertQuorumMet();
+ assertJoined(getServiceId());
+ assertMember(req.getServiceId());
+ assertInPipeline(req.getServiceId());
+ /*
+ * Note: This is a pre-condition, not an invariant. We
+ * verify on entry that this service is the leader. The
+ * invariant is that the quorum remains met on the current
+ * token, which is handled by assertQuorumMet().
+ */
+ getQuorum().assertLeader(token);
+ }
+
};
// Run task.
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumImpl.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumImpl.java 2013-10-11 13:50:11 UTC (rev 7447)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumImpl.java 2013-10-11 15:05:17 UTC (rev 7448)
@@ -60,6 +60,7 @@
import com.bigdata.quorum.Quorum;
import com.bigdata.quorum.QuorumActor;
import com.bigdata.quorum.QuorumClient;
+import com.bigdata.quorum.QuorumEventEnum;
import com.bigdata.quorum.QuorumException;
import com.bigdata.quorum.QuorumMember;
import com.bigdata.quorum.QuorumWatcher;
@@ -1457,12 +1458,12 @@
if (client != null) {
try {
client.disconnected();
- } catch (RuntimeException ex) {
- log.error(ex);
- } catch (Exception ex) {
- log.error(ex);
+ } catch (Exception t) {
+ launderThrowable(t);
}
}
+ sendEvent(new E(QuorumEventEnum.QUORUM_DISCONNECTED,
+ lastValidToken(), token(), null/* serviceId */));
}
/**
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2013-10-11 15:20:54
|
Revision: 7450
http://bigdata.svn.sourceforge.net/bigdata/?rev=7450&view=rev
Author: thompsonbry
Date: 2013-10-11 15:20:47 +0000 (Fri, 11 Oct 2013)
Log Message:
-----------
Removed the lock from AbstractQuorum.getClient() and getMember().
Added a sleep before retry in the ErrorTask when spinning until zk is reconnected for doServiceLeave().
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-10-11 15:18:36 UTC (rev 7449)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-10-11 15:20:47 UTC (rev 7450)
@@ -436,6 +436,7 @@
* and {@link QuorumWatcher} are created, and asynchronous discovery is
* initialized for the {@link QuorumWatcher}.
*/
+ @Override
public void start(final C client) {
if (client == null)
throw new IllegalArgumentException();
@@ -505,6 +506,15 @@
}
+ /**
+ * Ensure that any guarded regions are interrupted.
+ */
+ public void interruptAll() {
+
+ threadGuard.interruptAll();
+
+ }
+
@Override
public void terminate() {
boolean interrupted = false;
@@ -513,7 +523,7 @@
/*
* Ensure that any guarded regions are interrupted.
*/
- threadGuard.interruptAll();
+ interruptAll();
if (client == null) {
// No client is attached.
return;
@@ -676,6 +686,7 @@
* inconsistent since the internal lock required for a consistent view is
* NOT acquired.
*/
+ @Override
public String toString() {
/*
* Note: This must run w/o the lock to avoid deadlocks so there may be
@@ -720,21 +731,23 @@
}
}
+ @Override
public C getClient() {
- lock.lock();
- try {
+// lock.lock();
+// try {
final C client = this.client;
if (client == null)
throw new IllegalStateException();
return client;
- } finally {
- lock.unlock();
- }
+// } finally {
+// lock.unlock();
+// }
}
+ @Override
public QuorumMember<S> getMember() {
- lock.lock();
- try {
+// lock.lock();
+// try {
final C client = this.client;
if (client == null)
throw new IllegalStateException();
@@ -742,9 +755,9 @@
return (QuorumMember<S>) client;
}
throw new UnsupportedOperationException();
- } finally {
- lock.unlock();
- }
+// } finally {
+// lock.unlock();
+// }
}
/**
@@ -785,6 +798,7 @@
* @throws IllegalStateException
* if the quorum is not running.
*/
+ @Override
public QuorumActor<S, C> getActor() {
lock.lock();
try {
@@ -814,6 +828,7 @@
}
}
+ @Override
final public void addListener(final QuorumListener listener) {
if (listener == null)
throw new IllegalArgumentException();
@@ -822,6 +837,7 @@
listeners.add(listener);
}
+ @Override
final public void removeListener(final QuorumListener listener) {
if (listener == null)
throw new IllegalArgumentException();
@@ -830,6 +846,7 @@
listeners.remove(listener);
}
+ @Override
final public int replicationFactor() {
// Note: [k] is final.
@@ -837,18 +854,21 @@
}
+ @Override
public final boolean isQuorum(final int njoined) {
return njoined >= kmeet;
}
+ @Override
final public boolean isHighlyAvailable() {
return replicationFactor() > 1;
}
+ @Override
final public long lastValidToken() {
lock.lock();
try {
@@ -858,6 +878,7 @@
}
}
+ @Override
final public UUID[] getMembers() {
lock.lock();
try {
@@ -867,6 +888,7 @@
}
}
+ @Override
final public Map<Long, UUID[]> getVotes() {
lock.lock();
try {
@@ -889,6 +911,7 @@
}
}
+ @Override
final public Long getCastVote(final UUID serviceId) {
lock.lock();
try {
@@ -908,6 +931,7 @@
}
}
+ @Override
public Long getCastVoteIfConsensus(final UUID serviceId) {
lock.lock();
try {
@@ -971,6 +995,7 @@
return -1;
}
+ @Override
final public UUID[] getJoined() {
lock.lock();
try {
@@ -980,6 +1005,7 @@
}
}
+ @Override
final public UUID[] getPipeline() {
lock.lock();
try {
@@ -989,6 +1015,7 @@
}
}
+ @Override
final public UUID getLastInPipeline() {
lock.lock();
try {
@@ -1003,6 +1030,7 @@
}
}
+ @Override
final public UUID[] getPipelinePriorAndNext(final UUID serviceId) {
if (serviceId == null)
throw new IllegalArgumentException();
@@ -1030,6 +1058,7 @@
}
}
+ @Override
final public UUID getLeaderId() {
UUID leaderId = null;
final long tmp;
@@ -1060,6 +1089,7 @@
return leaderId;
}
+ @Override
final public long token() {
// Note: volatile read.
return token;
@@ -1079,6 +1109,7 @@
}
+ @Override
final public void assertLeader(final long token) {
if (token == NO_QUORUM) {
// The quorum was not met when the client obtained that token.
@@ -1102,12 +1133,14 @@
assertQuorum(token);
}
+ @Override
final public boolean isQuorumMet() {
return token != NO_QUORUM;
}
+ @Override
final public boolean isQuorumFullyMet(final long token) {
lock.lock();
@@ -1140,6 +1173,7 @@
* This watches the current token and will return as soon as the token is
* valid.
*/
+ @Override
final public long awaitQuorum() throws InterruptedException,
AsynchronousQuorumCloseException {
lock.lock();
@@ -1155,6 +1189,7 @@
}
}
+ @Override
final public long awaitQuorum(final long timeout, final TimeUnit units)
throws InterruptedException, TimeoutException,
AsynchronousQuorumCloseException {
@@ -1181,6 +1216,7 @@
}
}
+ @Override
final public void awaitBreak() throws InterruptedException,
AsynchronousQuorumCloseException {
lock.lock();
@@ -1196,6 +1232,7 @@
}
}
+ @Override
final public void awaitBreak(final long timeout, final TimeUnit units)
throws InterruptedException, TimeoutException,
AsynchronousQuorumCloseException {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-10-11 15:18:36 UTC (rev 7449)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-10-11 15:20:47 UTC (rev 7450)
@@ -1919,6 +1919,7 @@
} catch (RuntimeException re) {
if (InnerCause.isInnerCause(re,
KeeperException.class)) {
+ Thread.sleep(250/* ms */);
// Retry.
continue;
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2013-10-11 15:49:17
|
Revision: 7452
http://bigdata.svn.sourceforge.net/bigdata/?rev=7452&view=rev
Author: thompsonbry
Date: 2013-10-11 15:49:09 +0000 (Fri, 11 Oct 2013)
Log Message:
-----------
Rolling back the change to AbstractQuorum.getClient() and getMember(). This is aggrevating a problem where concurrent actions on the quorum can cause an awaited Condition to not become satisfied.
See #718 (HAJournalServer needs to handle ZK client connection loss)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-10-11 15:21:41 UTC (rev 7451)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-10-11 15:49:09 UTC (rev 7452)
@@ -733,21 +733,21 @@
@Override
public C getClient() {
-// lock.lock();
-// try {
+ lock.lock();
+ try {
final C client = this.client;
if (client == null)
throw new IllegalStateException();
return client;
-// } finally {
-// lock.unlock();
-// }
+ } finally {
+ lock.unlock();
+ }
}
@Override
public QuorumMember<S> getMember() {
-// lock.lock();
-// try {
+ lock.lock();
+ try {
final C client = this.client;
if (client == null)
throw new IllegalStateException();
@@ -755,9 +755,9 @@
return (QuorumMember<S>) client;
}
throw new UnsupportedOperationException();
-// } finally {
-// lock.unlock();
-// }
+ } finally {
+ lock.unlock();
+ }
}
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-10-11 15:21:41 UTC (rev 7451)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-10-11 15:49:09 UTC (rev 7452)
@@ -93,6 +93,7 @@
import com.bigdata.journal.IRootBlockView;
import com.bigdata.journal.RootBlockUtility;
import com.bigdata.journal.WORMStrategy;
+import com.bigdata.quorum.AbstractQuorum;
import com.bigdata.quorum.Quorum;
import com.bigdata.quorum.QuorumEvent;
import com.bigdata.quorum.QuorumException;
@@ -1859,7 +1860,7 @@
while (true) {
log.warn("Will do error handler.");
-
+// ((AbstractQuorum<HAGlue, QuorumService<HAGlue>>) getQuorum()).interruptAll();
/*
* Discard the current write set.
*
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2013-10-11 18:44:14
|
Revision: 7453
http://bigdata.svn.sourceforge.net/bigdata/?rev=7453&view=rev
Author: thompsonbry
Date: 2013-10-11 18:44:06 +0000 (Fri, 11 Oct 2013)
Log Message:
-----------
Bug fix for #736 (MIN/MAX should use ORDER BY semantics).
Workaround for #740 (NSPIN) so we can inspect this further.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/IVComparator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/MAX.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/MIN.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/TestMAX.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/TestMIN.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/IVComparator.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/IVComparator.java 2013-10-11 15:49:09 UTC (rev 7452)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/IVComparator.java 2013-10-11 18:44:06 UTC (rev 7453)
@@ -51,7 +51,7 @@
/**
* A comparator that compares {@link IV}s according the SPARQL value ordering as
- * specified in <A
+ * specified in <a
* href="http://www.w3.org/TR/rdf-sparql-query/#modOrderBy">SPARQL Query
* Language for RDF</a>. This implementation is based on the openrdf
* {@link ValueComparator} but has been modified to work with {@link IV}s.
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java 2013-10-11 15:49:09 UTC (rev 7452)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java 2013-10-11 18:44:06 UTC (rev 7453)
@@ -115,19 +115,28 @@
/**
* The #of times that we will use {@link BlockingQueue#offer(Object)} or
- * {@link Queue#poll()} before converting to the variants of those methods
- * which accept a timeout. The timeouts are used to reduce the contention
- * for the queue if either the producer or the consumer is lagging.
+ * before converting to the variant of that method which accept a timeout.
+ * The timeouts are used to reduce the contention for the queue if either
+ * the consumer is lagging.
*/
- private static final int NSPIN = 100;
-
+ private static final int NSPIN_ADD = Integer.valueOf(System.getProperty(
+ BlockingBuffer.class.getName() + ".NSPIN.ADD", "100"));
+
/**
+ * The #of times that we will use {@link Queue#poll()} before converting to
+ * the variant of that method which accept a timeout. The timeouts are used
+ * to reduce the contention for the queue if either the producer is lagging.
+ */
+ private static final int NSPIN_READ = Integer.valueOf(System.getProperty(
+ BlockingBuffer.class.getName() + ".NSPIN.READ", "100"));
+
+ /**
* The timeout for offer() or poll() as a function of the #of tries that
* have already been made to {@link #add(Object)} or read a chunk.
*
* @param ntries
* The #of tries.
- *
+ *
* @return The timeout (milliseconds).
*/
private static final long getTimeoutMillis(final int ntries) {
@@ -1038,7 +1047,7 @@
final boolean added;
- if (ntries < NSPIN) {
+ if (ntries < NSPIN_ADD) {
// offer (non-blocking).
added = queue.offer(e);
@@ -1804,7 +1813,7 @@
* inside of poll(timeout,unit) [@todo This could be fixed by a poison pill.]
*/
- if (ntries < NSPIN) {
+ if (ntries < NSPIN_READ) {
/*
* This is basically a spin lock (it can spin without
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/MAX.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/MAX.java 2013-10-11 15:49:09 UTC (rev 7452)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/MAX.java 2013-10-11 18:44:06 UTC (rev 7453)
@@ -25,13 +25,11 @@
import java.util.Map;
-import org.openrdf.query.algebra.Compare.CompareOp;
-import org.openrdf.query.algebra.evaluation.util.ValueComparator;
-
import com.bigdata.bop.BOp;
import com.bigdata.bop.IBindingSet;
import com.bigdata.bop.IValueExpression;
import com.bigdata.bop.aggregate.AggregateBase;
+import com.bigdata.bop.solutions.IVComparator;
import com.bigdata.rdf.internal.IV;
import com.bigdata.rdf.internal.constraints.CompareBOp;
import com.bigdata.rdf.internal.constraints.INeedsMaterialization;
@@ -43,7 +41,7 @@
* <p>
* Note: MIN (and MAX) are defined in terms of the ORDER_BY semantics for
* SPARQL. Therefore, this must handle comparisons when the value is not an IV,
- * e.g., using {@link ValueComparator}.
+ * e.g., using {@link IVComparator}.
*
* @author thompsonbry
*
@@ -58,15 +56,23 @@
*/
private static final long serialVersionUID = 1L;
- public MAX(MAX op) {
+ /**
+ * Provides SPARQL <em>ORDER BY</em> semantics.
+ *
+ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/736">
+ * MIN() malfunction </a>
+ */
+ private static final transient IVComparator comparator = new IVComparator();
+
+ public MAX(final MAX op) {
super(op);
}
- public MAX(BOp[] args, Map<String, Object> annotations) {
+ public MAX(final BOp[] args, final Map<String, Object> annotations) {
super(args, annotations);
}
- public MAX(boolean distinct, IValueExpression...expr) {
+ public MAX(final boolean distinct, final IValueExpression...expr) {
super(distinct, expr);
}
@@ -103,46 +109,44 @@
}
private IV doGet(final IBindingSet bindingSet) {
+
for (int i = 0; i < arity(); i++) {
final IValueExpression<IV<?, ?>> expr = (IValueExpression<IV<?, ?>>) get(i);
- final IV iv = expr.get(bindingSet);
+ final IV iv = expr.get(bindingSet);
- if (iv != null) {
+ if (iv != null) {
- /*
- * Aggregate non-null values.
- */
+ /*
+ * Aggregate non-null values.
+ */
- if (max == null) {
+ if (max == null) {
- max = iv;
+ max = iv;
- } else {
+ } else {
- /**
- * FIXME This needs to use the ordering define by ORDER_BY. The
- * CompareBOp imposes the ordering defined for the "<" operator
- * which is less robust and will throw a type exception if you
- * attempt to compare unlike Values.
- *
- * @see https://sourceforge.net/apps/trac/bigdata/ticket/300#comment:5
- */
- if (CompareBOp.compare(iv, max, CompareOp.GT)) {
+ // Note: This is SPARQL GT semantics, not ORDER BY.
+// if (CompareBOp.compare(iv, max, CompareOp.GT)) {
- max = iv;
+ // SPARQL ORDER_BY semantics
+ if (comparator.compare(iv, max) > 0) {
+ max = iv;
+
+ }
+
}
}
-
}
- }
return max;
}
+ @Override
synchronized public void reset() {
max = null;
@@ -172,6 +176,7 @@
*
* FIXME MikeP: What is the right return value here?
*/
+ @Override
public Requirement getRequirement() {
return INeedsMaterialization.Requirement.ALWAYS;
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/MIN.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/MIN.java 2013-10-11 15:49:09 UTC (rev 7452)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/MIN.java 2013-10-11 18:44:06 UTC (rev 7453)
@@ -25,20 +25,14 @@
import java.util.Map;
-import org.apache.log4j.Logger;
-import org.openrdf.query.algebra.Compare.CompareOp;
-import org.openrdf.query.algebra.evaluation.util.ValueComparator;
-
import com.bigdata.bop.BOp;
-import com.bigdata.bop.BOpBase;
import com.bigdata.bop.IBindingSet;
import com.bigdata.bop.IValueExpression;
import com.bigdata.bop.aggregate.AggregateBase;
-import com.bigdata.bop.aggregate.IAggregate;
+import com.bigdata.bop.solutions.IVComparator;
import com.bigdata.rdf.internal.IV;
import com.bigdata.rdf.internal.constraints.CompareBOp;
import com.bigdata.rdf.internal.constraints.INeedsMaterialization;
-import com.bigdata.rdf.internal.constraints.INeedsMaterialization.Requirement;
/**
* Operator reports the minimum observed value over the presented binding sets
@@ -47,11 +41,11 @@
* <p>
* Note: MIN (and MAX) are defined in terms of the ORDER_BY semantics for
* SPARQL. Therefore, this must handle comparisons when the value is not an IV,
- * e.g., using {@link ValueComparator}.
- *
+ * e.g., using the {@link IVComparator}.
+ *
* @author thompsonbry
- *
- * TODO What is reported if there are no non-null observations?
+ *
+ * TODO What is reported if there are no non-null observations?
*/
public class MIN extends AggregateBase<IV> implements INeedsMaterialization{
@@ -62,15 +56,23 @@
*/
private static final long serialVersionUID = 1L;
- public MIN(MIN op) {
+ /**
+ * Provides SPARQL <em>ORDER BY</em> semantics.
+ *
+ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/736">
+ * MIN() malfunction </a>
+ */
+ private static final transient IVComparator comparator = new IVComparator();
+
+ public MIN(final MIN op) {
super(op);
}
- public MIN(BOp[] args, Map<String, Object> annotations) {
+ public MIN(final BOp[] args, final Map<String, Object> annotations) {
super(args, annotations);
}
- public MIN(boolean distinct, IValueExpression...expr) {
+ public MIN(final boolean distinct, final IValueExpression...expr) {
super(distinct, expr);
}
@@ -107,46 +109,44 @@
}
private IV doGet(final IBindingSet bindingSet) {
- for(int i=0;i<arity();i++){
+ for (int i = 0; i < arity(); i++) {
+
final IValueExpression<IV> expr = (IValueExpression<IV>) get(i);
- final IV iv = expr.get(bindingSet);
+ final IV iv = expr.get(bindingSet);
- if (iv != null) {
+ if (iv != null) {
- /*
- * Aggregate non-null values.
- */
+ /*
+ * Aggregate non-null values.
+ */
- if (min == null) {
+ if (min == null) {
- min = iv;
+ min = iv;
- } else {
+ } else {
- /**
- * FIXME This needs to use the ordering define by ORDER_BY. The
- * CompareBOp imposes the ordering defined for the "<" operator
- * which is less robust and will throw a type exception if you
- * attempt to compare unlike Values.
- *
- * @see https://sourceforge.net/apps/trac/bigdata/ticket/300#comment:5
- */
- if (CompareBOp.compare(iv, min, CompareOp.LT)) {
+ // Note: This is SPARQL LT semantics, not ORDER BY.
+// if (CompareBOp.compare(iv, min, CompareOp.LT)) {
- min = iv;
+ // SPARQL ORDER_BY semantics
+ if (comparator.compare(iv, min) < 0) {
+ min = iv;
+
+ }
+
}
}
-
}
- }
return min;
}
+ @Override
synchronized public void reset() {
min = null;
@@ -176,6 +176,7 @@
*
* FIXME MikeP: What is the right return value here?
*/
+ @Override
public Requirement getRequirement() {
return INeedsMaterialization.Requirement.ALWAYS;
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java 2013-10-11 15:49:09 UTC (rev 7452)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java 2013-10-11 18:44:06 UTC (rev 7453)
@@ -36,14 +36,21 @@
import com.bigdata.bop.IBindingSet;
import com.bigdata.bop.IValueExpression;
import com.bigdata.bop.NV;
+import com.bigdata.bop.solutions.IVComparator;
import com.bigdata.rdf.error.SparqlTypeErrorException;
import com.bigdata.rdf.internal.IV;
import com.bigdata.rdf.internal.impl.literal.LiteralExtensionIV;
import com.bigdata.rdf.model.BigdataValue;
-import com.bigdata.rdf.sparql.ast.GlobalAnnotations;
/**
- * Perform open-world value comparison operations per the SPARQL spec.
+ * Perform open-world value comparison operations per the SPARQL spec (the LT
+ * operator). This does NOT implement the broader ordering for ORDER BY. That is
+ * handled by {@link IVComparator}.
+ *
+ * @see <a
+ * href="http://www.w3.org/TR/2013/REC-sparql11-query-20130321/#op_lt"><</a>
+ *
+ * @see IVComparator
*/
public class CompareBOp extends XSDBooleanIVValueExpression
implements INeedsMaterialization {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/TestMAX.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/TestMAX.java 2013-10-11 15:49:09 UTC (rev 7452)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/TestMAX.java 2013-10-11 18:44:06 UTC (rev 7453)
@@ -227,7 +227,19 @@
}
- public void test_max_with_errors() {
+ /**
+ * MAX is defined in terms of SPARQL <code>ORDER BY</code> rather than
+ * <code>GT</code>.
+ *
+ * @see <a href="http://www.w3.org/TR/rdf-sparql-query/#modOrderBy">SPARQL
+ * Query Language for RDF</a>
+ * @see <a
+ * href="http://www.w3.org/TR/2013/REC-sparql11-query-20130321/#op_lt"><</a>
+ *
+ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/736">
+ * MIN() malfunction </a>
+ */
+ public void test_max_uses_ORDER_BY_not_GT() {
final BigdataValueFactory f = BigdataValueFactoryImpl.getInstance(getName());
@@ -261,7 +273,7 @@
* ?org ?auth ?book ?lprice
* org1 auth1 book1 9
* org1 auth1 book3 5
- * org1 auth2 book3 7
+ * org1 auth2 book3 auth2
* org2 auth3 book4 7
* </pre>
*/
@@ -285,44 +297,50 @@
assertFalse(op.isDistinct());
assertFalse(op.isWildcard());
- try {
- op.reset();
- for (IBindingSet bs : data) {
- op.get(bs);
- }
- fail("Expecting: " + SparqlTypeErrorException.class);
- } catch (RuntimeException ex) {
- if (InnerCause.isInnerCause(ex, SparqlTypeErrorException.class)) {
- if (log.isInfoEnabled()) {
- log.info("Ignoring expected exception: " + ex);
- }
- } else {
- fail("Expecting: " + SparqlTypeErrorException.class, ex);
- }
+ op.reset();
+ for (IBindingSet bs : data) {
+ op.get(bs);
}
+ assertEquals(price9.get(), op.done());
+
+// try {
+// op.reset();
+// for (IBindingSet bs : data) {
+// op.get(bs);
+// }
+// fail("Expecting: " + SparqlTypeErrorException.class);
+// } catch (RuntimeException ex) {
+// if (InnerCause.isInnerCause(ex, SparqlTypeErrorException.class)) {
+// if (log.isInfoEnabled()) {
+// log.info("Ignoring expected exception: " + ex);
+// }
+// } else {
+// fail("Expecting: " + SparqlTypeErrorException.class, ex);
+// }
+// }
+//
+// /*
+// * Now verify that the error is sticky.
+// */
+// try {
+// op.done();
+// fail("Expecting: " + SparqlTypeErrorException.class);
+// } catch (RuntimeException ex) {
+// if (InnerCause.isInnerCause(ex, SparqlTypeErrorException.class)) {
+// if (log.isInfoEnabled()) {
+// log.info("Ignoring expected exception: " + ex);
+// }
+// } else {
+// fail("Expecting: " + SparqlTypeErrorException.class, ex);
+// }
+// }
+//
+// /*
+// * Now verify that reset() clears the error.
+// */
+// op.reset();
+// op.done();
- /*
- * Now verify that the error is sticky.
- */
- try {
- op.done();
- fail("Expecting: " + SparqlTypeErrorException.class);
- } catch (RuntimeException ex) {
- if (InnerCause.isInnerCause(ex, SparqlTypeErrorException.class)) {
- if (log.isInfoEnabled()) {
- log.info("Ignoring expected exception: " + ex);
- }
- } else {
- fail("Expecting: " + SparqlTypeErrorException.class, ex);
- }
- }
-
- /*
- * Now verify that reset() clears the error.
- */
- op.reset();
- op.done();
-
}
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/TestMIN.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/TestMIN.java 2013-10-11 15:49:09 UTC (rev 7452)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/TestMIN.java 2013-10-11 18:44:06 UTC (rev 7453)
@@ -34,7 +34,6 @@
import com.bigdata.bop.Var;
import com.bigdata.bop.bindingSet.ListBindingSet;
import com.bigdata.journal.ITx;
-import com.bigdata.rdf.error.SparqlTypeErrorException;
import com.bigdata.rdf.internal.IV;
import com.bigdata.rdf.internal.VTE;
import com.bigdata.rdf.internal.XSD;
@@ -47,7 +46,6 @@
import com.bigdata.rdf.model.BigdataValueFactory;
import com.bigdata.rdf.model.BigdataValueFactoryImpl;
import com.bigdata.rdf.sparql.ast.GlobalAnnotations;
-import com.bigdata.util.InnerCause;
/**
* Unit tests for {@link MIN}.
@@ -227,7 +225,19 @@
}
- public void test_min_with_errors() {
+ /**
+ * MIN is defined in terms of SPARQL <code>ORDER BY</code> rather than
+ * <code>LT</code>.
+ *
+ * @see <a href="http://www.w3.org/TR/rdf-sparql-query/#modOrderBy">SPARQL
+ * Query Language for RDF</a>
+ * @see <a
+ * href="http://www.w3.org/TR/2013/REC-sparql11-query-20130321/#op_lt"><</a>
+ *
+ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/736">
+ * MIN() malfunction </a>
+ */
+ public void test_min_uses_ORDER_BY_not_LT() {
final BigdataValueFactory f = BigdataValueFactoryImpl.getInstance(getName());
@@ -261,7 +271,7 @@
* ?org ?auth ?book ?lprice
* org1 auth1 book1 9
* org1 auth1 book3 5
- * org1 auth2 book3 7
+ * org1 auth2 book3 auth2
* org2 auth3 book4 7
* </pre>
*/
@@ -285,44 +295,51 @@
assertFalse(op.isDistinct());
assertFalse(op.isWildcard());
- try {
- op.reset();
- for (IBindingSet bs : data) {
- op.get(bs);
- }
- fail("Expecting: " + SparqlTypeErrorException.class);
- } catch (RuntimeException ex) {
- if (InnerCause.isInnerCause(ex, SparqlTypeErrorException.class)) {
- if (log.isInfoEnabled()) {
- log.info("Ignoring expected exception: " + ex);
- }
- } else {
- fail("Expecting: " + SparqlTypeErrorException.class, ex);
- }
+ op.reset();
+ for (IBindingSet bs : data) {
+ op.get(bs);
}
+
+ assertEquals(auth2.get(), op.done());
- /*
- * Now verify that the error is sticky.
- */
- try {
- op.done();
- fail("Expecting: " + SparqlTypeErrorException.class);
- } catch (RuntimeException ex) {
- if (InnerCause.isInnerCause(ex, SparqlTypeErrorException.class)) {
- if (log.isInfoEnabled()) {
- log.info("Ignoring expected exception: " + ex);
- }
- } else {
- fail("Expecting: " + SparqlTypeErrorException.class, ex);
- }
- }
+// try {
+// op.reset();
+// for (IBindingSet bs : data) {
+// op.get(bs);
+// }
+// fail("Expecting: " + SparqlTypeErrorException.class);
+// } catch (RuntimeException ex) {
+// if (InnerCause.isInnerCause(ex, SparqlTypeErrorException.class)) {
+// if (log.isInfoEnabled()) {
+// log.info("Ignoring expected exception: " + ex);
+// }
+// } else {
+// fail("Expecting: " + SparqlTypeErrorException.class, ex);
+// }
+// }
+//
+// /*
+// * Now verify that the error is sticky.
+// */
+// try {
+// op.done();
+// fail("Expecting: " + SparqlTypeErrorException.class);
+// } catch (RuntimeException ex) {
+// if (InnerCause.isInnerCause(ex, SparqlTypeErrorException.class)) {
+// if (log.isInfoEnabled()) {
+// log.info("Ignoring expected exception: " + ex);
+// }
+// } else {
+// fail("Expecting: " + SparqlTypeErrorException.class, ex);
+// }
+// }
+//
+// /*
+// * Now verify that reset() clears the error.
+// */
+// op.reset();
+// op.done();
- /*
- * Now verify that reset() clears the error.
- */
- op.reset();
- op.done();
-
}
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2013-10-19 20:02:02
|
Revision: 7461
http://bigdata.svn.sourceforge.net/bigdata/?rev=7461&view=rev
Author: thompsonbry
Date: 2013-10-19 20:01:51 +0000 (Sat, 19 Oct 2013)
Log Message:
-----------
Added CC and PR implementations and a reducer to report the histogram over the #of vertices at each depth for BFS. See #629 (Graph Processing API)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASContext.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASOptions.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASProgram.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASState.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGraphAccessor.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IReducer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/BFS.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/SSSP.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/BaseGASProgram.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASContext.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASState.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASEngine.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASEngine.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/GASRunnerBase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/VertexDistribution.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/AbstractGraphTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/analytics/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/analytics/TestBFS.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/analytics/TestSSSP.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/impl/ram/TestGather.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/impl/sail/AbstractSailGraphTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/impl/sail/TestGather.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASEngine.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASState.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestBFS.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestGather.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestSSSP.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/FrontierEnum.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/CC.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PR.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/analytics/TestCC.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/analytics/TestPR.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/data/ssspGraph.png
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/data/ssspGraph.ttl
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/FrontierEnum.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/FrontierEnum.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/FrontierEnum.java 2013-10-19 20:01:51 UTC (rev 7461)
@@ -0,0 +1,36 @@
+/**
+ Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+package com.bigdata.rdf.graph;
+
+/**
+ * Type-safe enumeration characterizing the assumptions of an algorithm
+ * concerning its initial frontier.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+public enum FrontierEnum {
+
+ /**
+ * The initial frontier is a single vertex.
+ */
+ SingleVertex,
+
+ /**
+ * The initial frontier is all vertices.
+ */
+ AllVertices;
+
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASContext.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASContext.java 2013-10-18 15:34:43 UTC (rev 7460)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASContext.java 2013-10-19 20:01:51 UTC (rev 7461)
@@ -50,6 +50,11 @@
IGASState<VS, ES, ST> getGASState();
/**
+ * The graph access object.
+ */
+ IGraphAccessor getGraphAccessor();
+
+ /**
* Execute one iteration.
*
* @param stats
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASOptions.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASOptions.java 2013-10-18 15:34:43 UTC (rev 7460)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASOptions.java 2013-10-19 20:01:51 UTC (rev 7461)
@@ -39,8 +39,28 @@
public interface IGASOptions<VS, ES, ST> {
/**
+ * Return the nature of the initial frontier for this algorithm.
+ */
+ FrontierEnum getInitialFrontierEnum();
+
+ /**
+ * Return the type of edges that must exist when sampling the vertices of
+ * the graph. If {@link EdgesEnum#InEdges} is specified, then each sampled
+ * vertex will have at least one in-edge. If {@link EdgesEnum#OutEdges} is
+ * specified, then each sampled vertex will have at least one out-edge. To
+ * sample all vertices regardless of their edges, specify
+ * {@value EdgesEnum#NoEdges}. To require that each vertex has at least one
+ * in-edge and one out-edge, specify {@link EdgesEnum#AllEdges}.
+ */
+ EdgesEnum getSampleEdgesFilter();
+
+ /**
* Return the set of edges to which the GATHER is applied -or-
* {@link EdgesEnum#NoEdges} to skip the GATHER phase.
+ *
+ * TODO We may need to set dynamically when visting the vertex in the
+ * frontier rather than having it be a one-time property of the vertex
+ * program.
*/
EdgesEnum getGatherEdges();
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASProgram.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASProgram.java 2013-10-18 15:34:43 UTC (rev 7460)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASProgram.java 2013-10-19 20:01:51 UTC (rev 7461)
@@ -32,21 +32,46 @@
* the generic type for the per-edge state, but that is not always
* true. The SUM type is scoped to the GATHER + SUM operation (NOT
* the computation).
- *
+ *
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ *
+ * TODO DESIGN: The broad problem with this approach is that it is
+ * overly coupled with the Java object model. Instead it needs to expose
+ * an API that is aimed at vectored (for GPU) execution with 2D
+ * partitioning (for out-of-core, multi-node).
*/
public interface IGASProgram<VS, ES, ST> extends IGASOptions<VS, ES, ST> {
/**
+ * One time initialization before the {@link IGASProgram} is executed.
+ *
+ * @param ctx
+ * The evaluation context.
+ */
+ void before(IGASContext<VS, ES, ST> ctx);
+
+ /**
+ * One time initialization after the {@link IGASProgram} is executed.
+ *
+ * @param ctx
+ * The evaluation context.
+ */
+ void after(IGASContext<VS, ES, ST> ctx);
+
+ /**
* Callback to initialize the state for each vertex in the initial frontier
* before the first iteration. A typical use case is to set the distance of
* the starting vertex to ZERO (0).
*
* @param u
* The vertex.
+ *
+ * TODO We do not need both the {@link IGASContext} and the
+ * {@link IGASState}. The latter is available from the former.
*/
- void init(IGASState<VS, ES, ST> state, Value u);
-
+ void initVertex(IGASContext<VS, ES, ST> ctx, IGASState<VS, ES, ST> state,
+ Value u);
+
/**
* GATHER is a map/reduce over the edges of the vertex. The SUM provides
* pair-wise reduction over the edges visited by the GATHER.
@@ -94,8 +119,14 @@
* TODO DESIGN: Rather than pair-wise reduction, why not use
* vectored reduction? That way we could use an array of primitives
* as well as objects.
+ *
+ * TODO DESIGN: This should be a reduced interface since we only
+ * need access to the comparator semantics while the [state]
+ * provides random access to vertex and edge state. The comparator
+ * is necessary for MIN semantics for the {@link Value}
+ * implementation of the backend. E.g., Value versus IV.
*/
- ST sum(ST left, ST right);
+ ST sum(final IGASState<VS, ES, ST> state, ST left, ST right);
/**
* Apply the reduced aggregation computed by GATHER + SUM to the vertex.
@@ -155,7 +186,7 @@
* Return <code>true</code> iff the algorithm should continue. This is
* invoked after every iteration, once the new frontier has been computed
* and {@link IGASState#round()} has been advanced. An implementation may
- * simple return <code>true</code>, in which case the algorithm will
+ * simply return <code>true</code>, in which case the algorithm will
* continue IFF the current frontier is not empty.
* <p>
* Note: While this can be used to make custom decisions concerning the
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASState.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASState.java 2013-10-18 15:34:43 UTC (rev 7460)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASState.java 2013-10-19 20:01:51 UTC (rev 7461)
@@ -51,6 +51,8 @@
/**
* {@link #reset()} the computation state and populate the initial frontier.
*
+ * @param ctx
+ * The execution context.
* @param v
* One or more vertices that will be included in the initial
* frontier.
@@ -58,7 +60,7 @@
* @throws IllegalArgumentException
* if no vertices are specified.
*/
- void init(Value... v);
+ void setFrontier(IGASContext<VS, ES, ST> ctx, Value... v);
/**
* Discard computation state (the frontier, vertex state, and edge state)
@@ -227,5 +229,19 @@
* TODO RDR : Link to an RDR wiki page as well.
*/
Statement decodeStatement(Value v);
+
+ /**
+ * Return -1, o, or 1 if <code>u</code> is LT, EQ, or GT <code>v</code>. A
+ * number of GAS programs depend on the ability to place an order over the
+ * vertex identifiers, as does 2D partitioning. The ordering provided by
+ * this method MAY be arbitrary, but it MUST be total and stable across the
+ * life-cycle of the GAS program evaluation.
+ *
+ * @param u
+ * A vertex.
+ * @param v
+ * Another vertex.
+ */
+ int compareTo(Value u, Value v);
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGraphAccessor.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGraphAccessor.java 2013-10-18 15:34:43 UTC (rev 7460)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGraphAccessor.java 2013-10-19 20:01:51 UTC (rev 7461)
@@ -31,18 +31,37 @@
public interface IGraphAccessor {
/**
- * Return the edges for the vertex.
+ * Return the #of edges of the specified type for the given vertex.
+ * <p>
+ * Note: This is not always a flyweight operation due to the need to filter
+ * for only the observable edge types. If this information is required, it
+ * may be best to cache it on the vertex state object for a given
+ * {@link IGASProgram}.
*
- * @param p
+ * @param ctx
* The {@link IGASContext}.
* @param u
* The vertex.
* @param edges
* Typesafe enumeration indicating which edges should be visited.
- *
+ *
* @return An iterator that will visit the edges for that vertex.
*/
- Iterator<Statement> getEdges(IGASContext<?, ?, ?> p, Value u,
+ long getEdgeCount(IGASContext<?, ?, ?> ctx, Value u, EdgesEnum edges);
+
+ /**
+ * Return the edges for the given vertex.
+ *
+ * @param ctx
+ * The {@link IGASContext}.
+ * @param u
+ * The vertex.
+ * @param edges
+ * Typesafe enumeration indicating which edges should be visited.
+ *
+ * @return An iterator that will visit the edges for that vertex.
+ */
+ Iterator<Statement> getEdges(IGASContext<?, ?, ?> ctx, Value u,
EdgesEnum edges);
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IReducer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IReducer.java 2013-10-18 15:34:43 UTC (rev 7460)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IReducer.java 2013-10-19 20:01:51 UTC (rev 7461)
@@ -37,7 +37,7 @@
* The result from applying the procedure to a single index
* partition.
*/
- public void visit(IGASState<VS, ES, ST> ctx, Value u);
+ public void visit(IGASState<VS, ES, ST> state, Value u);
/**
* Return the aggregated results as an implementation dependent object.
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/BFS.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/BFS.java 2013-10-18 15:34:43 UTC (rev 7460)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/BFS.java 2013-10-19 20:01:51 UTC (rev 7461)
@@ -15,16 +15,23 @@
*/
package com.bigdata.rdf.graph.analytics;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
import org.openrdf.model.Statement;
import org.openrdf.model.Value;
import com.bigdata.rdf.graph.EdgesEnum;
import com.bigdata.rdf.graph.Factory;
+import com.bigdata.rdf.graph.FrontierEnum;
import com.bigdata.rdf.graph.IGASContext;
import com.bigdata.rdf.graph.IGASScheduler;
import com.bigdata.rdf.graph.IGASState;
+import com.bigdata.rdf.graph.IReducer;
import com.bigdata.rdf.graph.impl.BaseGASProgram;
import cutthecrap.utils.striterators.IStriterator;
@@ -39,6 +46,8 @@
*/
public class BFS extends BaseGASProgram<BFS.VS, BFS.ES, Void> {
+// private static final Logger log = Logger.getLogger(BFS.class);
+
public static class VS {
/**
@@ -128,6 +137,13 @@
}
@Override
+ public FrontierEnum getInitialFrontierEnum() {
+
+ return FrontierEnum.SingleVertex;
+
+ }
+
+ @Override
public EdgesEnum getGatherEdges() {
return EdgesEnum.NoEdges;
@@ -158,7 +174,8 @@
* Not used.
*/
@Override
- public void init(final IGASState<BFS.VS, BFS.ES, Void> state, final Value u) {
+ public void initVertex(final IGASContext<BFS.VS, BFS.ES, Void> ctx,
+ final IGASState<BFS.VS, BFS.ES, Void> state, final Value u) {
state.getState(u).visit(0);
@@ -169,15 +186,20 @@
*/
@Override
public Void gather(IGASState<BFS.VS, BFS.ES, Void> state, Value u, Statement e) {
+
throw new UnsupportedOperationException();
+
}
/**
* Not used.
*/
@Override
- public Void sum(Void left, Void right) {
+ public Void sum(final IGASState<BFS.VS, BFS.ES, Void> state,
+ final Void left, final Void right) {
+
throw new UnsupportedOperationException();
+
}
/**
@@ -231,10 +253,124 @@
}
@Override
- public boolean nextRound(IGASContext<BFS.VS, BFS.ES, Void> ctx) {
+ public boolean nextRound(final IGASContext<BFS.VS, BFS.ES, Void> ctx) {
return true;
}
+ /**
+ * Reduce the active vertex stat, returning a histogram reporting the #of
+ * vertices at each distance from the starting vertex. There will always be
+ * one vertex at depth zero - this is the starting vertex. For each
+ * successive depth, the #of vertices that were labeled at that depth is
+ * reported. This is essentially the same as reporting the size of the
+ * frontier in each round of the traversal, but the histograph is reported
+ * based on the vertex state.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan
+ * Thompson</a>
+ *
+ * TODO Do another reducer that reports the actual BFS tree rather
+ * than a histogram. For each depth, it needs to have the set of
+ * vertices that are at that number of hops from the starting
+ * vertex. So, there is an outer map from depth to set. The inner
+ * set should also be concurrent if we allow concurrent reduction of
+ * the activated vertex state.
+ */
+ protected static class HistogramReducer implements
+ IReducer<VS, ES, Void, Map<Integer, AtomicLong>> {
+
+ private final ConcurrentHashMap<Integer, AtomicLong> values = new ConcurrentHashMap<Integer, AtomicLong>();
+
+ @Override
+ public void visit(final IGASState<VS, ES, Void> state, final Value u) {
+
+ final VS us = state.getState(u);
+
+ if (us != null) {
+
+ final Integer depth = Integer.valueOf(us.depth());
+
+ AtomicLong newval = values.get(depth);
+
+ if (newval == null) {
+
+ final AtomicLong oldval = values.putIfAbsent(depth,
+ newval = new AtomicLong());
+
+ if (oldval != null) {
+
+ // lost data race.
+ newval = oldval;
+
+ }
+
+ }
+
+ newval.incrementAndGet();
+
+ }
+
+ }
+
+ @Override
+ public Map<Integer, AtomicLong> get() {
+
+ return Collections.unmodifiableMap(values);
+
+ }
+
+ }
+
+ @Override
+ public void after(final IGASContext<BFS.VS, BFS.ES, Void> ctx) {
+
+ final HistogramReducer r = new HistogramReducer();
+
+ ctx.getGASState().reduce(r);
+
+ class NV implements Comparable<NV> {
+ public final int n;
+ public final long v;
+ public NV(final int n, final long v) {
+ this.n = n;
+ this.v = v;
+ }
+ @Override
+ public int compareTo(final NV o) {
+ if (o.n > this.n)
+ return -1;
+ if (o.n < this.n)
+ return 1;
+ return 0;
+ }
+ }
+
+ final Map<Integer, AtomicLong> h = r.get();
+
+ final NV[] a = new NV[h.size()];
+
+ int i = 0;
+
+ for (Map.Entry<Integer, AtomicLong> e : h.entrySet()) {
+
+ a[i++] = new NV(e.getKey().intValue(), e.getValue().get());
+
+ }
+
+ Arrays.sort(a);
+
+ System.out.println("distance, frontierSize, sumFrontierSize");
+ long sum = 0L;
+ for (NV t : a) {
+
+ System.out.println(t.n + ", " + t.v + ", " + sum);
+
+ sum += t.v;
+
+ }
+
+ }
+
}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/CC.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/CC.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/CC.java 2013-10-19 20:01:51 UTC (rev 7461)
@@ -0,0 +1,411 @@
+/**
+ Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+package com.bigdata.rdf.graph.analytics;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.log4j.Logger;
+import org.openrdf.model.Statement;
+import org.openrdf.model.Value;
+
+import com.bigdata.rdf.graph.EdgesEnum;
+import com.bigdata.rdf.graph.Factory;
+import com.bigdata.rdf.graph.FrontierEnum;
+import com.bigdata.rdf.graph.IGASContext;
+import com.bigdata.rdf.graph.IGASScheduler;
+import com.bigdata.rdf.graph.IGASState;
+import com.bigdata.rdf.graph.IReducer;
+import com.bigdata.rdf.graph.impl.BaseGASProgram;
+
+import cutthecrap.utils.striterators.IStriterator;
+
+/**
+ * Connected components computes the distinct sets of non-overlapping subgraphs
+ * within a graph. All vertices within a connected component are connected along
+ * at least one path.
+ * <p>
+ * The implementation works by assigning a label to each vertex. The label is
+ * initially the vertex identifier for that vertex. The labels in the graph are
+ * then relaxed with each vertex taking the minimum of its one-hop neighhor's
+ * labels. The algorithm halts when no vertex label has changed state in a given
+ * iteration.
+ *
+ * <dl>
+ * <dt>init</dt>
+ * <dd>All vertices are inserted into the initial frontier.</dd>
+ * <dt>Gather</dt>
+ * <dd>Report the source vertex label (not its identifier)</dd>
+ * <dt>Apply</dt>
+ * <dd>label = min(label,gatherLabel)</dd>
+ * <dt>Scatter</dt>
+ * <dd>iff the label has changed</dd>
+ * </dl>
+ *
+ * FIXME CC : Implement. Should push the updates through the scatter function.
+ * Find an abstraction to support this pattern. It is used by both CC and SSSP.
+ * (We can initially implement this as a Gather (over all edges) plus a
+ * conditional Scatter (over all edges iff the vertex label has changed). We can
+ * then refactor both this class and SSSP to push the updates through a Scatter
+ * (what I think of as a Gather to a remote vertex).)
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+public class CC extends BaseGASProgram<CC.VS, CC.ES, Value> {
+
+ private static final Logger log = Logger.getLogger(CC.class);
+
+ public static class VS {
+
+ /**
+ * The label for the vertex. This value is initially the vertex
+ * identifier. It is relaxed by the computation until it is the minimum
+ * vertex identifier for the connected component.
+ */
+ private final AtomicReference<Value> label;
+
+ /**
+ * <code>true</code> iff the label was modified.
+ */
+ private boolean changed = false;
+
+ public VS(final Value v) {
+
+ this.label = new AtomicReference<Value>(v);
+
+ }
+
+ /**
+ * The assigned label for this vertex. Once converged, all vertices in a
+ * given connected component will have the same label and the labels
+ * assigned to the vertices in each connected component will be
+ * distinct. The labels themselves are just the identifier of a vertex
+ * in that connected component. Conceptually, either the MIN or the MAX
+ * over the vertex identifiers in the connected component can be used by
+ * the algorithm since both will provide a unique labeling strategy.
+ */
+ public Value getLabel() {
+
+ return label.get();
+
+ }
+
+ private void setLabel(final Value v) {
+
+ label.set(v);
+
+ }
+
+ @Override
+ public String toString() {
+ return "{label=" + label + ",changed=" + changed + "}";
+ }
+
+ }// class VS
+
+ /**
+ * Edge state is not used.
+ */
+ public static class ES {
+
+ }
+
+ private static final Factory<Value, CC.VS> vertexStateFactory = new Factory<Value, CC.VS>() {
+
+ @Override
+ public CC.VS initialValue(final Value value) {
+
+ return new VS(value);
+
+ }
+
+ };
+
+ @Override
+ public Factory<Value, CC.VS> getVertexStateFactory() {
+
+ return vertexStateFactory;
+
+ }
+
+ @Override
+ public Factory<Statement, CC.ES> getEdgeStateFactory() {
+
+ return null;
+
+ }
+
+ @Override
+ public FrontierEnum getInitialFrontierEnum() {
+
+ return FrontierEnum.AllVertices;
+
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Overridden to not impose any filter on the sampled vertices (it does not
+ * matter whether they have any connected edges since we need to put all
+ * vertices into the initial frontier).
+ */
+ @Override
+ public EdgesEnum getSampleEdgesFilter() {
+
+ return EdgesEnum.NoEdges;
+
+ }
+
+ @Override
+ public EdgesEnum getGatherEdges() {
+
+ return EdgesEnum.AllEdges;
+
+ }
+
+ @Override
+ public EdgesEnum getScatterEdges() {
+
+ return EdgesEnum.AllEdges;
+
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Overridden to only visit the edges of the graph.
+ */
+ @Override
+ public IStriterator constrainFilter(
+ final IGASContext<CC.VS, CC.ES, Value> ctx, final IStriterator itr) {
+
+ return itr.addFilter(getEdgeOnlyFilter(ctx));
+
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Return the label of the remote vertex.
+ */
+ @Override
+ public Value gather(final IGASState<CC.VS, CC.ES, Value> state,
+ final Value u, final Statement e) {
+
+ final Value v = state.getOtherVertex(u, e);
+
+ final CC.VS vs = state.getState(v);
+
+ return vs.getLabel();
+
+ }
+
+ /**
+ * MIN
+ * <p>
+ * {@inheritDoc}
+ */
+ @Override
+ public Value sum(final IGASState<CC.VS, CC.ES, Value> state,
+ final Value left, final Value right) {
+
+ // MIN(left,right)
+ if (state.compareTo(left, right) < 0) {
+
+ return left;
+
+ }
+
+ return right;
+
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Compute the new value for this vertex, making a note of the last change
+ * for this vertex.
+ */
+ @Override
+ public CC.VS apply(final IGASState<CC.VS, CC.ES, Value> state,
+ final Value u, final Value sum) {
+
+ final CC.VS us = state.getState(u);
+
+ if (sum == null) {
+
+ /*
+ * Nothing visited by Gather. No change. Vertex will be dropped from
+ * the frontier.
+ */
+
+ us.changed = false;
+
+ return null;
+
+ }
+
+ final Value oldval = us.getLabel();
+
+ // MIN(oldval,gatherSum)
+ if (state.compareTo(oldval, sum) <= 0) {
+
+ us.changed = false;
+
+ if (log.isDebugEnabled())
+ log.debug(" NO CHANGE: " + u + ", val=" + oldval);
+
+ } else {
+
+ us.setLabel(sum);
+
+ us.changed = true;
+
+ if (log.isDebugEnabled())
+ log.debug("DID CHANGE: " + u + ", old=" + oldval + ", new="
+ + sum);
+
+ }
+
+ return us;
+
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Returns <code>true</code> iff the la...
[truncated message content] |
|
From: <mrp...@us...> - 2013-10-25 16:27:04
|
Revision: 7483
http://bigdata.svn.sourceforge.net/bigdata/?rev=7483&view=rev
Author: mrpersonick
Date: 2013-10-25 16:26:56 +0000 (Fri, 25 Oct 2013)
Log Message:
-----------
fixed ticket 761: Cardinality problem with ArbitraryLengthPathOp
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestPropertyPaths.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-1.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-1.srx
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-2.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-2.srx
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-2.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-3.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-3.srx
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-4.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-5.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-6.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-6.srx
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-7.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-7.srx
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-7.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths.ttl
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java 2013-10-25 13:20:15 UTC (rev 7482)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java 2013-10-25 16:26:56 UTC (rev 7483)
@@ -27,10 +27,12 @@
package com.bigdata.bop.paths;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
@@ -265,9 +267,10 @@
while (sitr.hasNext()) {
final IBindingSet[] chunk = sitr.next();
-
- for (IBindingSet bs : chunk)
- processChunk(new IBindingSet[] { bs });
+ processChunk(chunk);
+
+// for (IBindingSet bs : chunk)
+// processChunk(new IBindingSet[] { bs });
}
@@ -320,12 +323,51 @@
log.debug("gearing: " + gearing);
}
+ final boolean noInput = chunkIn == null || chunkIn.length == 0 ||
+ (chunkIn.length == 1 && chunkIn[0].isEmpty());
+
+ final IVariable<?> joinVar = gearing.inVar != null ?
+ gearing.inVar :
+ (gearing.outVar != null ? gearing.outVar : null);
+
+ if (log.isDebugEnabled()) {
+ log.debug("join var: " + joinVar);
+ }
+
+ /*
+ * Fix cardinality problem here
+ */
+ final Map<IConstant<?>, List<IBindingSet>> chunkInBySolutionKey =
+ noInput ? null :
+ new LinkedHashMap<IConstant<?>, List<IBindingSet>>();
+
+ if (!noInput) {
+
+ for (IBindingSet parentSolutionIn : chunkIn) {
+
+ final IConstant<?> key = joinVar != null ? parentSolutionIn.get(joinVar) : null; //newSolutionKey(gearing, parentSolutionIn);
+
+ if (log.isDebugEnabled()) {
+ log.debug("adding parent solution for joining: " + parentSolutionIn);
+ log.debug("join key: " + key);
+ }
+
+ if (!chunkInBySolutionKey.containsKey(key)) {
+ chunkInBySolutionKey.put(key, new ArrayList<IBindingSet>());
+ }
+
+ chunkInBySolutionKey.get(key).add(parentSolutionIn);
+
+ }
+
+ }
+
for (IBindingSet parentSolutionIn : chunkIn) {
if (log.isDebugEnabled())
log.debug("parent solution in: " + parentSolutionIn);
- IBindingSet childSolutionIn = parentSolutionIn.clone();
+ final IBindingSet childSolutionIn = parentSolutionIn.clone();
/*
* The seed is either a constant on the input side of
@@ -405,6 +447,9 @@
try {
+ /*
+ * TODO replace with code that does the PipelineJoins manually
+ */
runningSubquery = queryEngine.eval(subquery,
nextRoundInput.toArray(new IBindingSet[nextRoundInput.size()]));
@@ -550,103 +595,322 @@
}
} // fixed point for loop
-
+
/*
- * Do some final filtering and then send the solutions
- * down the pipeline.
+ * Handle the case where there is a constant on the output side of
+ * the subquery. Make sure the solution's transitive output
+ * variable matches. Filter out solutions where tVarOut != outConst.
*/
- final Iterator<Map.Entry<SolutionKey, IBindingSet>> it =
- solutionsOut.entrySet().iterator();
-
- while (it.hasNext()) {
-
- final Map.Entry<SolutionKey, IBindingSet> entry = it.next();
-
- final IBindingSet bs = entry.getValue();
-
- if (log.isDebugEnabled()) {
- log.debug("considering possible solution: " + bs);
- }
-
- if (gearing.outConst != null) {
-
+ if (gearing.outConst != null) {
+
+ final Iterator<Map.Entry<SolutionKey, IBindingSet>> it =
+ solutionsOut.entrySet().iterator();
+
+ while (it.hasNext()) {
+
+ final IBindingSet bs = it.next().getValue();
+
/*
- * Handle the case where there is a constant on the
- * output side of the subquery. Make sure the
- * solution's transitive output variable matches.
*/
if (!bs.get(gearing.tVarOut).equals(gearing.outConst)) {
if (log.isDebugEnabled()) {
log.debug("transitive output does not match output const, dropping");
+ log.debug(bs.get(gearing.tVarOut));
+ log.debug(gearing.outConst);
}
it.remove();
+ }
+
+ }
+
+ }
+
+ if (lowerBound == 0 && (gearing.inVar != null && gearing.outVar != null)) {
+
+ final Map<SolutionKey, IBindingSet> zlps = new LinkedHashMap<SolutionKey, IBindingSet>();
+
+ for (IBindingSet bs : solutionsOut.values()) {
+
+ // is this right??
+ if (bs.isBound(gearing.outVar)) {
+
continue;
}
+
+ { // left to right
+
+ final IBindingSet zlp = bs.clone();
+
+ zlp.set(gearing.tVarOut, zlp.get(gearing.inVar));
+
+ final SolutionKey key = newSolutionKey(gearing, zlp);
+
+ if (!solutionsOut.containsKey(key)) {
+
+ zlps.put(key, zlp);
+
+ }
+
+ }
+
+ { // right to left
+
+ final IBindingSet zlp = bs.clone();
+
+ zlp.set(gearing.inVar, zlp.get(gearing.tVarOut));
+
+ final SolutionKey key = newSolutionKey(gearing, zlp);
+
+ if (!solutionsOut.containsKey(key)) {
+
+ zlps.put(key, zlp);
+
+ }
+
+ }
+
+ }
+
+ solutionsOut.putAll(zlps);
+
+ }
+
+ /*
+ * We can special case when there was no input (and
+ * thus no join is required).
+ */
+ if (noInput) {
- } else { // outVar != null
-
- /*
- * Handle the case where the gearing.outVar was bound
- * coming in. Again, make sure it matches the
- * transitive output variable.
- */
- if (bs.isBound(gearing.outVar)) {
+ for (IBindingSet bs : solutionsOut.values()) {
+
+ /*
+ * Set the binding for the outVar if necessary.
+ */
+ if (gearing.outVar != null) {
- if (!bs.get(gearing.tVarOut).equals(bs.get(gearing.outVar))) {
-
- if (log.isDebugEnabled()) {
- log.debug("transitive output does not match incoming binding for output var, dropping");
- }
-
- it.remove();
-
- continue;
-
- }
-
- } else {
-
- /*
- * Handle the normal case - when we simply
- * need to copy the transitive output over to
- * the real output.
- */
bs.set(gearing.outVar, bs.get(gearing.tVarOut));
}
+ /*
+ * Clear the intermediate variables before sending the output
+ * down the pipeline.
+ */
+ bs.clear(gearing.tVarIn);
+ bs.clear(gearing.tVarOut);
+
}
- if (log.isDebugEnabled()) {
- log.debug("solution accepted");
- }
+ final IBindingSet[] chunkOut =
+ solutionsOut.values().toArray(
+ new IBindingSet[solutionsOut.size()]);
+
+ if (log.isDebugEnabled()) {
+ log.debug("final output to sink:\n" + Arrays.toString(chunkOut).replace("}, ", "},\n"));
+ }
+
+ // copy accepted binding sets to the default sink.
+ context.getSink().add(chunkOut);
- /*
- * Should we drop the intermediate variables now?
- */
- bs.clear(gearing.tVarIn);
- bs.clear(gearing.tVarOut);
-
- }
+ } else {
- final IBindingSet[] chunkOut =
- solutionsOut.values().toArray(
- new IBindingSet[solutionsOut.size()]);
-
- if (log.isDebugEnabled()) {
- log.debug("final output to sink:\n" + Arrays.toString(chunkOut));
+ final ArrayList<IBindingSet> finalOutput = new ArrayList<IBindingSet>();
+
+ final Iterator<Map.Entry<SolutionKey, IBindingSet>> it =
+ solutionsOut.entrySet().iterator();
+
+ while (it.hasNext()) {
+
+ final Map.Entry<SolutionKey, IBindingSet> entry = it.next();
+
+ final IBindingSet bs = entry.getValue();
+
+ if (log.isDebugEnabled()) {
+ log.debug("considering possible solution: " + bs);
+ }
+
+ final IConstant<?> key = joinVar != null ? bs.get(joinVar) : null;
+
+ if (key != null && chunkInBySolutionKey.containsKey(key)) {
+
+ final List<IBindingSet> parentSolutionsIn = chunkInBySolutionKey.get(key);
+
+ if (log.isDebugEnabled()) {
+ log.debug("join key: " + key);
+ log.debug("parent solutions to join: " + parentSolutionsIn);
+ }
+
+ for (IBindingSet parentSolutionIn : parentSolutionsIn) {
+
+ if (gearing.outConst != null) {
+
+ /*
+ * No need to clone, since we are not modifying the
+ * incoming parent solution in this case. The ALP
+ * is simply acting as a filter.
+ */
+ finalOutput.add(parentSolutionIn);
+
+ } else { // outVar != null
+
+ /*
+ * Handle the case where the gearing.outVar was bound
+ * coming in. Again, make sure it matches the
+ * transitive output variable.
+ */
+ if (parentSolutionIn.isBound(gearing.outVar)) {
+
+ // do this later now
+
+ if (!bs.get(gearing.tVarOut).equals(parentSolutionIn.get(gearing.outVar))) {
+
+ if (log.isDebugEnabled()) {
+ log.debug("transitive output does not match incoming binding for output var, dropping");
+ }
+
+ continue;
+
+ } else {
+
+ /*
+ * No need to clone, since we are not modifying the
+ * incoming parent solution in this case. The ALP
+ * is simply acting as a filter.
+ */
+ finalOutput.add(parentSolutionIn);
+
+ }
+
+ } else {
+
+ /*
+ * Handle the normal case - when we simply
+ * need to copy the transitive output over to
+ * the real output.
+ */
+ // bs.set(gearing.outVar, bs.get(gearing.tVarOut));
+
+ /*
+ * Clone, modify, and accept.
+ */
+ final IBindingSet join = parentSolutionIn.clone();
+
+ join.set(gearing.outVar, bs.get(gearing.tVarOut));
+
+ finalOutput.add(join);
+
+ }
+
+ }
+
+ if (log.isDebugEnabled()) {
+ log.debug("solution accepted");
+ }
+
+ }
+
+ }
+
+ /*
+ * Always do the null solutions if there are any.
+ */
+ if (chunkInBySolutionKey.containsKey(null)) {
+
+ /*
+ * Join the null solutions. These solutions represent
+ * a cross product (no shared variables with the ALP node).
+ */
+ final List<IBindingSet> nullSolutions = chunkInBySolutionKey.get(null);
+
+ if (log.isDebugEnabled()) {
+ log.debug("null solutions to join: " + nullSolutions);
+ }
+
+ for (IBindingSet nullSolution : nullSolutions) {
+
+ final IBindingSet solution;
+
+ // if ((gearing.inVar != null && !nullSolution.isBound(gearing.inVar)) ||
+ // (gearing.outVar != null && !nullSolution.isBound(gearing.outVar))) {
+ if (gearing.inVar != null || gearing.outVar != null) {
+
+ solution = nullSolution.clone();
+
+ } else {
+
+ solution = nullSolution;
+
+ }
+
+ if (gearing.inVar != null) {
+
+ if (solution.isBound(gearing.inVar)) {
+
+ /*
+ * This should never happen.
+ */
+ throw new RuntimeException();
+
+ } else {
+
+ solution.set(gearing.inVar, bs.get(gearing.inVar));
+
+ }
+
+ }
+
+ if (gearing.outVar != null) {
+
+ if (solution.isBound(gearing.outVar)) {
+
+ /*
+ * This should never happen.
+ */
+ throw new RuntimeException();
+ // if (!bs.get(gearing.tVarOut).equals(solution.get(gearing.outVar))) {
+ //
+ // // discard this solution;
+ // continue;
+ //
+ // }
+
+ } else {
+
+ solution.set(gearing.outVar, bs.get(gearing.tVarOut));
+
+ }
+
+ }
+
+ finalOutput.add(solution);
+
+ if (log.isDebugEnabled()) {
+ log.debug("solution accepted");
+ }
+
+ }
+
+ }
+
+ }
+
+ final IBindingSet[] chunkOut = finalOutput.toArray(new IBindingSet[finalOutput.size()]);
+ // solutionsOut.values().toArray(
+ // new IBindingSet[solutionsOut.size()]);
+
+ if (log.isDebugEnabled()) {
+ log.debug("final output to sink:\n" + Arrays.toString(chunkOut).replace("}, ", "},\n"));
+ }
+
+ // copy accepted binding sets to the default sink.
+ context.getSink().add(chunkOut);
+
}
-
- // copy accepted binding sets to the default sink.
- context.getSink().add(chunkOut);
- // done.
-// return runningSubquery;
-
} // processChunk method
/**
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestPropertyPaths.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestPropertyPaths.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestPropertyPaths.java 2013-10-25 16:26:56 UTC (rev 7483)
@@ -0,0 +1,152 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2013. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+package com.bigdata.rdf.sparql.ast.eval;
+
+
+
+public class TestPropertyPaths extends AbstractDataDrivenSPARQLTestCase {
+
+ /**
+ *
+ */
+ public TestPropertyPaths() {
+ }
+
+ /**
+ * @param name
+ */
+ public TestPropertyPaths(String name) {
+ super(name);
+ }
+
+// private void property_path_test(String name) throws Exception {
+//
+// new TestHelper(
+// "property-path-734-" + name, // testURI,
+// "property-path-734-" + name + ".rq", // queryFileURL
+// "property-path-734.ttl", // dataFileURL
+// "property-path-734.srx" // resultFileURL,
+// ).runTest();
+// }
+//
+// private void property_path_using_workaround_test(String name) throws Exception {
+//
+// new TestHelper(
+// "property-path-734-B-" + name, // testURI,
+// "property-path-734-B-" + name + ".rq", // queryFileURL
+// "property-path-734-B.ttl", // dataFileURL
+// "property-path-734-B.srx" // resultFileURL,
+// ).runTest();
+// }
+
+ public void test_inVar_outConst_notBound() throws Exception {
+
+ new TestHelper(
+ "property-paths", // testURI,
+ "property-paths-1.rq", // queryFileURL
+ "property-paths-2.ttl", // dataFileURL
+ "property-paths-1.srx" // resultFileURL,
+ ).runTest();
+
+ }
+
+ public void test_inVar_outConst_inBound() throws Exception {
+
+ new TestHelper(
+ "property-paths", // testURI,
+ "property-paths-1.rq", // queryFileURL
+ "property-paths.ttl", // dataFileURL
+ "property-paths-1.srx" // resultFileURL,
+ ).runTest();
+
+ }
+
+ public void test_inVar_outVar_inBound() throws Exception {
+
+ new TestHelper(
+ "property-paths", // testURI,
+ "property-paths-2.rq", // queryFileURL
+ "property-paths.ttl", // dataFileURL
+ "property-paths-2.srx" // resultFileURL,
+ ).runTest();
+
+ }
+
+ public void test_inVar_outVar_outBound() throws Exception {
+
+ new TestHelper(
+ "property-paths", // testURI,
+ "property-paths-3.rq", // queryFileURL
+ "property-paths-2.ttl", // dataFileURL
+ "property-paths-3.srx" // resultFileURL,
+ ).runTest();
+
+ }
+
+ public void test_inVar_outVar_bothBound() throws Exception {
+
+ new TestHelper(
+ "property-paths", // testURI,
+ "property-paths-3.rq", // queryFileURL
+ "property-paths.ttl", // dataFileURL
+ "property-paths-3.srx" // resultFileURL,
+ ).runTest();
+
+ }
+
+ public void test_inConst_outConst() throws Exception {
+
+ new TestHelper(
+ "property-paths", // testURI,
+ "property-paths-4.rq", // queryFileURL
+ "property-paths.ttl", // dataFileURL
+ "property-paths-3.srx" // resultFileURL,
+ ).runTest();
+
+ }
+
+ public void test_inVar_outVar_noSharedVars() throws Exception {
+
+ new TestHelper(
+ "property-paths", // testURI,
+ "property-paths-6.rq", // queryFileURL
+ "property-paths.ttl", // dataFileURL
+ "property-paths-6.srx" // resultFileURL,
+ ).runTest();
+
+ }
+
+ public void test_inVar_outVar_someSharedVars() throws Exception {
+
+ new TestHelper(
+ "property-paths", // testURI,
+ "property-paths-7.rq", // queryFileURL
+ "property-paths-7.ttl", // dataFileURL
+ "property-paths-7.srx" // resultFileURL,
+ ).runTest();
+
+ }
+
+}
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestPropertyPaths.java
___________________________________________________________________
Added: svn:mime-type
+ text/plain
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-1.rq
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-1.rq (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-1.rq 2013-10-25 16:26:56 UTC (rev 7483)
@@ -0,0 +1,11 @@
+
+prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
+prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
+
+SELECT ?A ?val
+WHERE {
+ ?A <os:prop> <os:P> .
+ ?A rdf:value ?val .
+ ?A rdf:type / rdfs:subClassOf *
+ <os:ClassA> ;
+}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-1.srx
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-1.srx (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-1.srx 2013-10-25 16:26:56 UTC (rev 7483)
@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<sparql
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:xs="http://www.w3.org/2001/XMLSchema#"
+ xmlns="http://www.w3.org/2005/sparql-results#" >
+ <head>
+ <variable name="A"/>
+ </head>
+ <results>
+ <result>
+ <binding name="A">
+ <uri>os:0</uri>
+ </binding>
+ <binding name="val">
+ <literal>x</literal>
+ </binding>
+ </result>
+ <result>
+ <binding name="A">
+ <uri>os:0</uri>
+ </binding>
+ <binding name="val">
+ <literal>y</literal>
+ </binding>
+ </result>
+ <result>
+ <binding name="A">
+ <uri>os:1</uri>
+ </binding>
+ <binding name="val">
+ <literal>z</literal>
+ </binding>
+ </result>
+ </results>
+</sparql>
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-2.rq
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-2.rq (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-2.rq 2013-10-25 16:26:56 UTC (rev 7483)
@@ -0,0 +1,10 @@
+
+prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
+prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
+
+SELECT ?A ?val ?type
+WHERE {
+ ?A <os:prop> <os:P> .
+ ?A rdf:value ?val .
+ ?A rdf:type / rdfs:subClassOf * ?type .
+}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-2.srx
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-2.srx (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-2.srx 2013-10-25 16:26:56 UTC (rev 7483)
@@ -0,0 +1,55 @@
+<?xml version="1.0"?>
+<sparql
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:xs="http://www.w3.org/2001/XMLSchema#"
+ xmlns="http://www.w3.org/2005/sparql-results#" >
+ <head>
+ <variable name="A"/>
+ </head>
+ <results>
+ <result>
+ <binding name="A">
+ <uri>os:0</uri>
+ </binding>
+ <binding name="val">
+ <literal>x</literal>
+ </binding>
+ <binding name="type">
+ <uri>os:ClassA</uri>
+ </binding>
+ </result>
+ <result>
+ <binding name="A">
+ <uri>os:0</uri>
+ </binding>
+ <binding name="val">
+ <literal>y</literal>
+ </binding>
+ <binding name="type">
+ <uri>os:ClassA</uri>
+ </binding>
+ </result>
+ <result>
+ <binding name="A">
+ <uri>os:1</uri>
+ </binding>
+ <binding name="val">
+ <literal>z</literal>
+ </binding>
+ <binding name="type">
+ <uri>os:ClassA</uri>
+ </binding>
+ </result>
+ <result>
+ <binding name="A">
+ <uri>os:1</uri>
+ </binding>
+ <binding name="val">
+ <literal>z</literal>
+ </binding>
+ <binding name="type">
+ <uri>os:ClassB</uri>
+ </binding>
+ </result>
+ </results>
+</sparql>
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-2.ttl
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-2.ttl (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/property-paths-2.ttl 2013-10-25 16:26:56 UTC (rev 7483)
@@ -0,0 +1,...
[truncated message content] |
|
From: <tho...@us...> - 2013-10-25 21:18:59
|
Revision: 7485
http://bigdata.svn.sourceforge.net/bigdata/?rev=7485&view=rev
Author: thompsonbry
Date: 2013-10-25 21:18:46 +0000 (Fri, 25 Oct 2013)
Log Message:
-----------
Resolution for #718 (ZK disconnect)
Merged back to the main development branch (revisions from r7464 to r7484 were merged back).
{{{
merge -r7464:HEAD https://bigdata.svn.sourceforge.net/svnroot/bigdata/branches/ZK_DISCONNECT_HANDLING /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN
...
C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/test/com/bigdata/zookeeper/TestAll.java
C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/java/com/bigdata/zookeeper/ZooKeeperAccessor.java
C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAClient.java
C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java
C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java
...
===== File Statistics: =====
Added: 4
Updated: 41
==== Conflict Statistics: =====
File conflicts: 5
}}}
The conflicts are all the files that I had locally modified when I created the ZK_DISCONNECT_HANDLING branch. In each case, I accepted the changes from the ZK_DISCONNECT_HANDLING branch.
The HA CI test suite runs green locally (except for the 3 known failures related to #760).
Revision Links:
--------------
http://bigdata.svn.sourceforge.net/bigdata/?rev=7464&view=rev
http://bigdata.svn.sourceforge.net/bigdata/?rev=7484&view=rev
http://bigdata.svn.sourceforge.net/bigdata/?rev=7464&view=rev
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/.classpath
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/AbstractProcessCollector.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumClient.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/logging/log4j-dev.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/quorum/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/quorum/TestSingletonQuorumSemantics.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/jini/start/ManageLogicalServiceTask.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/DumpLogDigests.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAClient.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/service/jini/lookup/ServiceCache.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/zookeeper/ZooKeeperAccessor.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3DumpLogs.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServerWithHALogs.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/dumpFile.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/quorum/zk/AbstractZkQuorumTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/quorum/zk/MockQuorumMember.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/quorum/zk/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/quorum/zk/TestZkQuorum.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/quorum/zk/TestZkSingletonQuorumSemantics.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/zookeeper/AbstractZooTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/zookeeper/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/zookeeper/TestZLockImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/CreateKBTask.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java
branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/lib/apache/zookeeper-3.3.3.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumClient.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/quorum/zk/TestSplitZPath.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/zookeeper/TestZookeeperSessionSemantics.java
Property Changed:
----------------
branches/BIGDATA_RELEASE_1_3_0/
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/util/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/jsr166/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/util/httpd/
branches/BIGDATA_RELEASE_1_3_0/bigdata-compatibility/
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/attr/
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/disco/
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/util/config/
branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/
branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/btc/
branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/btc/src/resources/
branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/lubm/
branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/uniprot/
branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/uniprot/src/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/error/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/relation/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/util/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/samples/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/internal/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/relation/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/LEGAL/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/lib/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/java/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/java/it/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/java/it/unimi/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/test/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/test/it/unimi/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/test/it/unimi/dsi/
branches/BIGDATA_RELEASE_1_3_0/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/
branches/BIGDATA_RELEASE_1_3_0/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/
branches/BIGDATA_RELEASE_1_3_0/osgi/
branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/config/
Property changes on: branches/BIGDATA_RELEASE_1_3_0
___________________________________________________________________
Modified: svn:mergeinfo
- /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785
/branches/BIGDATA_RELEASE_1_2_0:6766-7380
/branches/BTREE_BUFFER_BRANCH:2004-2045
/branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782
/branches/INT64_BRANCH:4486-4522
/branches/JOURNAL_HA_BRANCH:2596-4066
/branches/LARGE_LITERALS_REFACTOR:4175-4387
/branches/LEXICON_REFACTOR_BRANCH:2633-3304
/branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
/branches/READ_CACHE:7215-7271
/branches/RWSTORE_1_1_0_DEBUG:5896-5935
/branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836
/branches/bugfix-btm:2594-3237
/branches/dev-btm:2574-2730
/branches/fko:3150-3194
/trunk:3392-3437,3656-4061
+ /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785
/branches/BIGDATA_RELEASE_1_2_0:6766-7380
/branches/BTREE_BUFFER_BRANCH:2004-2045
/branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782
/branches/INT64_BRANCH:4486-4522
/branches/JOURNAL_HA_BRANCH:2596-4066
/branches/LARGE_LITERALS_REFACTOR:4175-4387
/branches/LEXICON_REFACTOR_BRANCH:2633-3304
/branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
/branches/READ_CACHE:7215-7271
/branches/RWSTORE_1_1_0_DEBUG:5896-5935
/branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836
/branches/ZK_DISCONNECT_HANDLING:7465-7484
/branches/bugfix-btm:2594-3237
/branches/dev-btm:2574-2730
/branches/fko:3150-3194
/trunk:3392-3437,3656-4061
Modified: branches/BIGDATA_RELEASE_1_3_0/.classpath
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/.classpath 2013-10-25 20:18:24 UTC (rev 7484)
+++ branches/BIGDATA_RELEASE_1_3_0/.classpath 2013-10-25 21:18:46 UTC (rev 7485)
@@ -33,7 +33,7 @@
<classpathentry kind="src" path="bigdata-gas/src/test"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/dsi-utils-1.0.6-020610.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/lgpl-utils-1.0.6-020610.jar"/>
- <classpathentry kind="lib" path="bigdata-jini/lib/apache/zookeeper-3.4.5.jar"/>
+ <classpathentry kind="lib" path="bigdata-jini/lib/apache/zookeeper-3.3.3.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-continuation-7.2.2.v20101205.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-http-7.2.2.v20101205.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-io-7.2.2.v20101205.jar"/>
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty
___________________________________________________________________
Modified: svn:mergeinfo
- /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380
/branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522
/branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
/branches/READ_CACHE/bigdata/lib/jetty:7215-7271
/branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935
/branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836
+ /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380
/branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522
/branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
/branches/READ_CACHE/bigdata/lib/jetty:7215-7271
/branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935
/branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836
/branches/ZK_DISCONNECT_HANDLING/bigdata/lib/jetty:7465-7484
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate
___________________________________________________________________
Modified: svn:mergeinfo
- /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522
/branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
/branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate:7215-7271
/branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935
/branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836
+ /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522
/branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
/branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate:7215-7271
/branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935
/branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836
/branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/aggregate:7465-7484
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph
___________________________________________________________________
Modified: svn:mergeinfo
- /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522
/branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
/branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph:7215-7271
/branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935
/branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836
+ /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522
/branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
/branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph:7215-7271
/branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935
/branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836
/branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/joinGraph:7465-7484
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util
___________________________________________________________________
Modified: svn:mergeinfo
- /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522
/branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
/branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/util:7215-7271
/branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935
/branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836
+ /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522
/branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
/branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/util:7215-7271
/branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935
/branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836
/branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/util:7465-7484
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/AbstractProcessCollector.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/AbstractProcessCollector.java 2013-10-25 20:18:24 UTC (rev 7484)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/AbstractProcessCollector.java 2013-10-25 21:18:46 UTC (rev 7485)
@@ -77,10 +77,13 @@
}
/**
+ * {@inheritDoc}
+ * <p>
* Creates the {@link ActiveProcess} and the
- * {@link ActiveProcess#start(com.bigdata.counters.AbstractStatisticsCollector.AbstractProcessReader)}s
- * it passing in the value returned by the {@link #getProcessReader()}
+ * {@link ActiveProcess#start(AbstractProcessReader)}s it passing in the
+ * value returned by the {@link #getProcessReader()}
*/
+ @Override
public void start() {
log.info("");
@@ -91,6 +94,7 @@
}
+ @Override
public void stop() {
log.info("");
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba
___________________________________________________________________
Modified: svn:mergeinfo
- /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522
/branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
/branches/READ_CACHE/bigdata/src/java/com/bigdata/htree/raba:7215-7271
/branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/htree/raba:5896-5935
/branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4814-4836
+ /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522
/branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
/branches/READ_CACHE/bigdata/src/java/com/bigdata/htree/raba:7215-7271
/branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/htree/raba:5896-5935
/branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4814-4836
/branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/htree/raba:7465-7484
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-10-25 20:18:24 UTC (rev 7484)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-10-25 21:18:46 UTC (rev 7485)
@@ -3293,7 +3293,7 @@
* The next offset at which user data would be written.
* Calculated, after commit!
*/
- nextOffset = _bufferStrategy.getNextOffset();
+ nextOffset = _bufferStrategy.getNextOffset();
final long blockSequence;
@@ -3468,9 +3468,16 @@
* the met quorum; and (b) voted YES in response to
* the PREPARE message.
*/
+ try {
+ quorumService.abort2Phase(commitToken);
+ } finally {
+ throw new RuntimeException(
+ "PREPARE rejected: nyes="
+ + resp.getYesCount()
+ + ", replicationFactor="
+ + resp.replicationFactor());
+ }
- quorumService.abort2Phase(commitToken);
-
}
} catch (Throwable e) {
@@ -5422,8 +5429,17 @@
if (quorum == null)
return;
- // This quorum member.
- final QuorumService<HAGlue> localService = quorum.getClient();
+ // The HAQuorumService (if running).
+ final QuorumService<HAGlue> localService;
+ {
+ QuorumService<HAGlue> t;
+ try {
+ t = quorum.getClient();
+ } catch (IllegalStateException ex) {
+ t = null;
+ }
+ localService = t;
+ }
// Figure out the state transitions involved.
final QuorumTokenTransitions transitionState = new QuorumTokenTransitions(
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166
___________________________________________________________________
Modified: svn:mergeinfo
- /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/jsr166:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522
/branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
/branches/READ_CACHE/bigdata/src/java/com/bigdata/jsr166:7215-7271
/branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/jsr166:5896-5935
/branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/jsr166:4814-4836
+ /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/jsr166:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522
/branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
/branches/READ_CACHE/bigdata/src/java/com/bigdata/jsr166:7215-7271
/branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/jsr166:5896-5935
/branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/jsr166:4814-4836
/branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/jsr166:7465-7484
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-10-25 20:18:24 UTC (rev 7484)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-10-25 21:18:46 UTC (rev 7485)
@@ -31,6 +31,7 @@
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
@@ -48,7 +49,6 @@
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
@@ -301,7 +301,9 @@
* Note: This is volatile to allow visibility without holding the
* {@link #lock}. The field is only modified in {@link #start(QuorumClient)}
* and {@link #terminate()}, and those methods use the {@link #lock} to
- * impose an appropriate ordering over events.
+ * impose an appropriate ordering over events. The quorum is running iff
+ * there is a client for which it is delivering events. When <code>null</code>,
+ * the quorum is not running.
*
* @see #start(QuorumClient)
*/
@@ -582,71 +584,72 @@
*/
interruptAll();
if (client == null) {
- // No client is attached.
+ // No client? Not running.
return;
}
if (log.isDebugEnabled())
log.debug("client=" + client);
- if (client instanceof QuorumMember<?>) {
- /*
- * Update the distributed quorum state by removing our client
- * from the set of member services. This will also cause a
- * service leave, pipeline leave, and any vote to be withdrawn.
- *
- * We have observed Condition spins during terminate() that
- * result in HAJournalServer hangs. This runs another Thread
- * that will interrupt this Thread if the quorum member is
- * unable to complete the memberRemove() within a timeout.
- *
- * Note: Since we are holding the lock in the current thread, we
- * MUST execute memberRemove() in this thread (it requires the
- * lock). Therefore, I have used a 2nd thread that will
- * interrupt this thread if it does not succeed in a polite
- * removal from the quorum within a timeout.
- */
- {
- final long MEMBER_REMOVE_TIMEOUT = 5000;// ms.
- final AtomicBoolean didRemove = new AtomicBoolean(false);
- final Thread self = Thread.currentThread();
- final Thread t = new Thread() {
- public void run() {
- try {
- Thread.sleep(MEMBER_REMOVE_TIMEOUT);
- } catch (InterruptedException e) {
- // Expected. Ignored.
- return;
- }
- if (!didRemove.get()) {
- log.error("Timeout awaiting quorum member remove.");
- self.interrupt();
- }
- }
- };
- t.setDaemon(true);
- t.start();
- try {
- // Attempt memberRemove() (interruptably).
- actor.memberRemoveInterruptable();
- didRemove.set(true); // Success.
- } catch (InterruptedException e) {
- // Propagate the interrupt.
- Thread.currentThread().interrupt();
- } finally {
- t.interrupt(); // Stop execution of [t].
+// if (clie...
[truncated message content] |
|
From: <tho...@us...> - 2013-10-30 19:19:49
|
Revision: 7501
http://bigdata.svn.sourceforge.net/bigdata/?rev=7501&view=rev
Author: thompsonbry
Date: 2013-10-30 19:19:39 +0000 (Wed, 30 Oct 2013)
Log Message:
-----------
I have reviewed and revised the following aspects of the commit and 2-phase commit protocol:
- Journal: cancellation of the Future in the GATHER consensus protocol is no longer logged as an ERROR. This is just a data race.
- HAJournalServer: enterErrorState() is now written to be defensive. Nothing will be thrown out, even if the HAQuorumService is not running (quorum.terminate())).
- HAJournalServer.HAQuorumServer.start()/terminate() now use an AtomicBoolean to guard those methods and to make enterErrorState() a NOP if the HAQuorumService is currently in either of those methods.
- HAJournalServer.logRootBlock() no longer has the isJoinedService boolean. This method is only called in the 2-phase commit logic and the 2-phase commit is only executed for services that were joined with the met quorum as of the atomic decision point in commitNow().
- commitNow() - refactored to use a CommitState object and pushed down methods for the different things that are being done into that object. The error handling for the two phase commit logic was simplified to increase understandability.
- The Prepare2Phase and Commit2Phase tasks were simplified. The core code was pushed down into private inner methods. This makes it easier to analyze the error handling code paths.
- TestDumpJournal: added coverage of some more conditions looking to replicate an error observed on the HA3 cluster. I was not able to replicate the problem. It may have been related to allocator recycling or abnormal failure mode on bigdata17 as per this ticket:
{{{
ERROR: 2885231 2013-10-28 16:05:28,194 qtp230584058-49 com.bigdata.rdf.sail.webapp.StatusServlet.doGet(StatusServlet.java:863): java.lang.RuntimeException: java.lang.ClassCastException: com.bigdata.btree.BTree cannot be cast to com.bigdata.journal.Name2Addr
java.lang.RuntimeException: java.lang.ClassCastException: com.bigdata.btree.BTree cannot be cast to com.bigdata.journal.Name2Addr
at com.bigdata.journal.DumpJournal.dumpNamedIndicesMetadata(DumpJournal.java:726)
at com.bigdata.journal.DumpJournal.dumpJournal(DumpJournal.java:599)
at com.bigdata.rdf.sail.webapp.StatusServlet.doGet(StatusServlet.java:485)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:707)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:820)
at org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:534)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:475)
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:929)
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:403)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:864)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:117)
at org.eclipse.jetty.server.handler.HandlerList.handle(HandlerList.java:47)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:114)
at org.eclipse.jetty.server.Server.handle(Server.java:352)
at org.eclipse.jetty.server.HttpConnection.handleRequest(HttpConnection.java:596)
at org.eclipse.jetty.server.HttpConnection$RequestHandler.headerComplete(HttpConnection.java:1051)
at org.eclipse.jetty.http.HttpParser.parseNext(HttpParser.java:590)
at org.eclipse.jetty.http.HttpParser.parseAvailable(HttpParser.java:212)
at org.eclipse.jetty.server.HttpConnection.handle(HttpConnection.java:426)
at org.eclipse.jetty.io.nio.SelectChannelEndPoint.handle(SelectChannelEndPoint.java:508)
at org.eclipse.jetty.io.nio.SelectChannelEndPoint.access$000(SelectChannelEndPoint.java:34)
at org.eclipse.jetty.io.nio.SelectChannelEndPoint$1.run(SelectChannelEndPoint.java:40)
at org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:451)
at java.lang.Thread.run(Thread.java:724)
Caused by: java.lang.ClassCastException: com.bigdata.btree.BTree cannot be cast to com.bigdata.journal.Name2Addr
at com.bigdata.journal.AbstractJournal.getIndexWithCommitRecord(AbstractJournal.java:4751)
at com.bigdata.journal.DumpJournal.dumpNamedIndicesMetadata(DumpJournal.java:706)
... 23 more
}}}
TODO:
- Review abort2Phase(). For some invocation contexts, this should be restricted to services that were joined with the met quorum and that voted YES for the PREPARE message.
- QuorumCommitImpl: I have reviewed methods, including the cancellation of remote futures and the error paths. modified to use the local executor service to submit RMI requests in parallel and simplified the code paths for interrupt and error handling for RMI requests or for failures to locate the proxy for a service. However, the new code causes some problems with the HA CI test suite and is being withheld while I investigate those issues in more depth.
I have run threw the HA CI test suite, RWStore test suite, WORM test suite, and TestBigdataSailWithQuads. All is green.
See #760
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Journal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java 2013-10-30 19:11:05 UTC (rev 7500)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java 2013-10-30 19:19:39 UTC (rev 7501)
@@ -160,19 +160,21 @@
* onto the {@link HALogWriter}.
* <p>
* Note: This method is ONLY invoked as part of the 2-phase commit protocol.
- * Therefore, it ONLY applies to the live HALog file. A service is
+ * Therefore, this method ONLY applies to the live HALog file. A service is
* atomically either joined with the met quorum at a 2-phase commit point or
- * not joined. This information is passed through from the 2-phase prepare
- * in the <i>isJoinedService</i> argument.
+ * not joined. The PREPARE and COMMIT messages are ONLY generated for
+ * services that were joined with the met quorum as of that atomic decision
+ * point in the commit protocol. Therefore, this method is never called for
+ * a service that was not joined as of that atomic decision point.
*
- * @param isJoinedService
- * <code>true</code> iff the service was joined with the met
- * quorum at the atomic decision point in the 2-phase commit
- * protocol.
* @param rootBlock
* The root block for the commit point that was just achieved.
*/
- void logRootBlock(final boolean isJoinedService,
+// * @param isJoinedService
+// * <code>true</code> iff the service was joined with the met
+// * quorum at the atomic decision point in the 2-phase commit
+// * protocol.
+ void logRootBlock(//final boolean isJoinedService,
final IRootBlockView rootBlock) throws IOException;
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java 2013-10-30 19:11:05 UTC (rev 7500)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java 2013-10-30 19:19:39 UTC (rev 7501)
@@ -145,10 +145,10 @@
}
@Override
- public void logRootBlock(final boolean isJoinedService,
+ public void logRootBlock(//final boolean isJoinedService,
final IRootBlockView rootBlock) throws IOException {
- QuorumServiceBase.this.logRootBlock(isJoinedService, rootBlock);
+ QuorumServiceBase.this.logRootBlock(/*isJoinedService,*/ rootBlock);
}
@@ -294,7 +294,7 @@
* Note: The default implementation is a NOP.
*/
@Override
- public void logRootBlock(final boolean isJoinedService,
+ public void logRootBlock(//final boolean isJoinedService,
final IRootBlockView rootBlock) throws IOException {
// NOP
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-10-30 19:11:05 UTC (rev 7500)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-10-30 19:19:39 UTC (rev 7501)
@@ -144,6 +144,7 @@
import com.bigdata.io.IDataRecord;
import com.bigdata.io.IDataRecordAccess;
import com.bigdata.io.SerializerUtil;
+import com.bigdata.io.writecache.WriteCacheService;
import com.bigdata.journal.Name2Addr.Entry;
import com.bigdata.mdi.IResourceMetadata;
import com.bigdata.mdi.JournalMetadata;
@@ -164,6 +165,7 @@
import com.bigdata.rwstore.IAllocationManager;
import com.bigdata.rwstore.IHistoryManager;
import com.bigdata.rwstore.IRWStrategy;
+import com.bigdata.rwstore.RWStore;
import com.bigdata.rwstore.sector.MemStrategy;
import com.bigdata.rwstore.sector.MemoryManager;
import com.bigdata.service.AbstractHATransactionService;
@@ -2765,23 +2767,25 @@
*/
_bufferStrategy.abort();
- /*
- * Discard hard references to any indices. The Name2Addr reference
- * will also be discarded below. This should be sufficient to ensure
- * that any index requested by the methods on the AbstractJournal
- * will be re-read from disk using the commit record which we
- * re-load below. This is necessary in order to discard any
- * checkpoints that may have been written on indices since the last
- * commit.
- *
- * FIXME Verify this is not required. Historical index references
- * should not be discarded on abort as they remain valid. Discarding
- * them admits the possibility of a non-canonicalizing cache for the
- * historical indices since an existing historical index reference
- * will continue to be held but a new copy of the index will be
- * loaded on the next request if we clear the cache here.
- */
+ /*
+ * The Name2Addr reference will be discarded below. This should be
+ * sufficient to ensure that any index requested by the methods on
+ * the AbstractJournal will be re-read from disk using the commit
+ * record which we re-load below. This is necessary in order to
+ * discard any checkpoints that may have been written on indices
+ * since the last commit (dirty indices that have not been
+ * checkpointed to the disk are discarded when we discard
+ * Name2Addr).
+ *
+ * Note: Historical index references should NOT be discarded on
+ * abort as they remain valid. Discarding them admits the
+ * possibility of a non-canonicalizing cache for the historical
+ * indices since an existing historical index reference will
+ * continue to be held but a new copy of the index will be loaded on
+ * the next request if we clear the cache here.
+ */
// historicalIndexCache.clear();
+
// discard the commit record and re-read from the store.
_commitRecord = _getCommitRecord();
@@ -3030,165 +3034,192 @@
}
- /**
- * An atomic commit is performed by directing each registered
- * {@link ICommitter} to flush its state onto the store using
- * {@link ICommitter#handleCommit(long)}. The address returned by that
- * method is the address from which the {@link ICommitter} may be reloaded
- * (and its previous address if its state has not changed). That address is
- * saved in the {@link ICommitRecord} under the index for which that
- * committer was {@link #registerCommitter(int, ICommitter) registered}. We
- * then force the data to stable store, update the root block, and force the
- * root block and the file metadata to stable store.
- * <p>
- * Note: Each invocation of this method MUST use a distinct
- * <i>commitTime</i> and the commitTimes MUST be monotonically increasing.
- * These guarantees support both the database version history mechanisms and
- * the High Availability mechanisms.
- *
- * @param commitTime
- * The commit time either of a transaction or of an unisolated
- * commit. Note that when mixing isolated and unisolated commits
- * you MUST use the same {@link ITimestampService} for both
- * purposes.
- *
- * @return The timestamp assigned to the commit record -or- 0L if there were
- * no data to commit.
- */
- // Note: Overridden by StoreManager (DataService).
- protected long commitNow(final long commitTime) {
-
- final WriteLock lock = _fieldReadWriteLock.writeLock();
+ /**
+ * Class to which we attach all of the little pieces of state during
+ * {@link AbstractJournal#commitNow(long)}.
+ * <p>
+ * The non-final fields in this class are laid directly below the method
+ * which set those fields. The methods in the class are laid out in the
+ * top-to-bottom order in which they are executed by commitNow().
+ */
+ static private class CommitState {
+
+ /**
+ * The timestamp at which the commit began.
+ */
+ private final long beginNanos;
- lock.lock();
+ /**
+ * The backing store.
+ */
+ private final AbstractJournal store;
- try {
-
- assertOpen();
+ /**
+ * The backing {@link IBufferStrategy} for the {@link #store}.
+ */
+ private final IBufferStrategy _bufferStrategy;
+
+ /**
+ * The quorum iff HA and <code>null</code> otherwise.
+ */
+ private final Quorum<HAGlue, QuorumService<HAGlue>> quorum;
- final long beginNanos = System.nanoTime();
+ /**
+ * Local HA service implementation (non-Remote) and <code>null</code> if
+ * not in an HA mode..
+ */
+ private final QuorumService<HAGlue> quorumService;
+
+ /**
+ * The commit time either of a transaction or of an unisolated commit.
+ * Note that when mixing isolated and unisolated commits you MUST use
+ * the same {@link ITimestampService} for both purposes.
+ */
+ private final long commitTime;
+
+ /**
+ * The current root block on the journal as of the start of the commit
+ * protocol.
+ */
+ private final IRootBlockView old;
- // #of bytes on the journal as of the previous commit point.
- final long byteCountBefore = _rootBlock.getNextOffset();
+ /**
+ * The quorum token associated with this commit point.
+ */
+ private final long commitToken;
+
+ /** The #of bytes on the journal as of the previous commit point. */
+ private final long byteCountBefore;
+
+ /**
+ * The commit counter that will be assigned to the new commit point.
+ */
+ private final long newCommitCounter;
+
+ /**
+ *
+ * @param store
+ * The backing store.
+ * @param commitTime
+ * The commit time either of a transaction or of an
+ * unisolated commit. Note that when mixing isolated and
+ * unisolated commits you MUST use the same
+ * {@link ITimestampService} for both purposes.
+ */
+ public CommitState(final AbstractJournal store, final long commitTime) {
- if (log.isInfoEnabled())
- log.info("commitTime=" + commitTime);
+ if (store == null)
+ throw new IllegalArgumentException();
+
+ this.beginNanos = System.nanoTime();
- assertCommitTimeAdvances(commitTime);
+ this.store = store;
- final IRootBlockView old = _rootBlock;
+ this.commitTime = commitTime;
- final long newCommitCounter = old.getCommitCounter() + 1;
+ this._bufferStrategy = store._bufferStrategy;
- /*
- * First, run each of the committers accumulating the updated root
- * addresses in an array. In general, these are btrees and they may
- * have dirty nodes or leaves that needs to be evicted onto the
- * store. The first time through, any newly created btrees will have
- * dirty empty roots (the btree code does not optimize away an empty
- * root at this time). However, subsequent commits without
- * intervening data written on the store should not cause any
- * committers to update their root address.
+ // Note: null if not HA.
+ this.quorum = store.quorum;
+
+ /*
+ * Local HA service implementation (non-Remote).
*
- * Note: This also checkpoints the deferred free block list.
- */
- final long[] rootAddrs = notifyCommitters(commitTime);
+ * Note: getClient() throws IllegalStateException if quorum exists
+ * and is not not running.
+ */
+ this.quorumService = quorum == null ? null : quorum.getClient();
- /*
- * See if anything has been written on the store since the last
- * commit.
- */
- if (!_bufferStrategy.requiresCommit(_rootBlock)) {
+ this.old = store._rootBlock;
- /*
- * No data was written onto the store so the commit can not
- * achieve any useful purpose.
- */
+ // #of bytes on the journal as of the previous commit point.
+ this.byteCountBefore = store._rootBlock.getNextOffset();
- if (log.isInfoEnabled())
- log.info("Nothing to commit");
+ this.newCommitCounter = old.getCommitCounter() + 1;
- return 0L;
- }
-
+ this.commitToken = store.quorumToken;
+
+ store.assertCommitTimeAdvances(commitTime);
+
+ }
+
+ /**
+ * Notify {@link ICommitter}s to flush out application data. This sets
+ * the {@link #rootAddrs} for the {@link ICommitRecord}.
+ *
+ * @return <code>true</code> if the store is dirty and the commit should
+ * proceed and <code>false</code> otherwise.
+ */
+ private boolean notifyCommitters() {
+
/*
- * Explicitly call the RootBlockCommitter
+ * First, run each of the committers accumulating the updated root
+ * addresses in an array. In general, these are btrees and they may
+ * have dirty nodes or leaves that needs to be evicted onto the
+ * store. The first time through, any newly created btrees will have
+ * dirty empty roots (the btree code does not optimize away an empty
+ * root at this time). However, subsequent commits without
+ * intervening data written on the store should not cause any
+ * committers to update their root address.
+ *
+ * Note: This also checkpoints the deferred free block list.
*/
- rootAddrs[PREV_ROOTBLOCK] = this.m_rootBlockCommitter
- .handleCommit(commitTime);
+ rootAddrs = store.notifyCommitters(commitTime);
- // Local HA service implementation (non-Remote).
- final QuorumService<HAGlue> quorumService = quorum == null ? null
- : quorum.getClient();
+ /*
+ * See if anything has been written on the store since the last
+ * commit.
+ */
+ if (!_bufferStrategy.requiresCommit(store._rootBlock)) {
- final IJoinedAndNonJoinedServices gatherJoinedAndNonJoinedServices;
- final IHANotifyReleaseTimeResponse consensusReleaseTime;
- if ((_bufferStrategy instanceof IHABufferStrategy)
- && quorum != null && quorum.isHighlyAvailable()) {
-
- /**
- * CRITICAL SECTION. We need obtain a distributed consensus for
- * the services joined with the met quorum concerning the
- * earliest commit point that is pinned by the combination of
- * the active transactions and the minReleaseAge on the TXS. New
- * transaction starts during this critical section will block
- * (on the leader or the folllower) unless they are guaranteed
- * to be allowable, e.g., based on the current minReleaseAge,
- * the new tx would read from the most recent commit point, the
- * new tx would ready from a commit point that is already pinned
- * by an active transaction on that node, etc.
+ /*
+ * Will not do commit.
*
- * Note: Lock makes this section MUTEX with awaitServiceJoin().
- *
- * @see <a href=
- * "https://docs.google.com/document/d/14FO2yJFv_7uc5N0tvYboU-H6XbLEFpvu-G8RhAzvxrk/edit?pli=1#"
- * > HA TXS Design Document </a>
- *
- * @see <a
- * href="https://sourceforge.net/apps/trac/bigdata/ticket/623"
- * > HA TXS / TXS Bottleneck </a>
+ * Note: No data was written onto the store so the commit can
+ * not achieve any useful purpose.
*/
- _gatherLock.lock();
+ return false;
- try {
-
- // Atomic decision point for GATHER re joined services.
- gatherJoinedAndNonJoinedServices = new JoinedAndNonJoinedServices(
- quorum);
-
- // Run the GATHER protocol.
- consensusReleaseTime = ((AbstractHATransactionService) getLocalTransactionManager()
- .getTransactionService())
- .updateReleaseTimeConsensus(newCommitCounter,
- commitTime,
- gatherJoinedAndNonJoinedServices.getJoinedServiceIds(),
- getHAReleaseTimeConsensusTimeout(),
- TimeUnit.MILLISECONDS);
+ }
+
+ /*
+ * Explicitly call the RootBlockCommitter
+ *
+ * Note: This logs the current root block and set the address of
+ * that root block in the as a root address in the commitRecord.
+ * This is of potential use solely in disaster recovery scenarios
+ * where your root blocks are toast, but good root blocks can be
+ * found elsewhere in the file. Once you find a root block, you can
+ * get the commitRecordIndex and then find earlier root blocks using
+ * that root addr. Or you can just scan the file looking for valid
+ * root blocks and then use the most recent one that you can find.
+ */
+ rootAddrs[PREV_ROOTBLOCK] = store.m_rootBlockCommitter
+ .handleCommit(commitTime);
- } catch (Exception ex) {
+ // Will do commit.
+ return true;
- log.error(ex, ex);
-
- // Wrap and rethrow.
- throw new RuntimeException(ex);
-
- } finally {
+ }
- _gatherLock.unlock();
-
- }
-
- } else {
-
- /*
- * Not HA. Did not do GATHER.
- */
-
- gatherJoinedAndNonJoinedServices = null;
- consensusReleaseTime = null;
-
- } // if (HA) do GATHER
+ /**
+ * The new root addresses for the {@link ICommitRecord}.
+ *
+ * @see #notifyCommitters()
+ */
+ private long[] rootAddrs;
+
+ /**
+ * Write out the {@link ICommitRecord}, noting the
+ * {@link #commitRecordAddr}, add the {@link ICommitRecord} to the
+ * {@link CommitRecordIndex}. Finally, checkpoint the
+ * {@link CommitRecordIndex} setting the {@link #commitRecordIndexAddr}.
+ * <p>
+ * Note: This is also responsible for recycling the deferred frees for
+ * {@link IHistoryManager} backends.
+ */
+ private void writeCommitRecord() {
/*
* Before flushing the commitRecordIndex we need to check for
@@ -3202,23 +3233,15 @@
*/
if (_bufferStrategy instanceof IHistoryManager) {
- ((IHistoryManager) _bufferStrategy).checkDeferredFrees(this);
+ ((IHistoryManager) _bufferStrategy)
+ .checkDeferredFrees(store);
}
- /*
- * Write the commit record onto the store.
- *
- * @todo Modify to log the current root block and set the address of
- * that root block in the commitRecord. This will be of use solely
- * in disaster recovery scenarios where your root blocks are toast,
- * but good root blocks can be found elsewhere in the file.
- */
-
final ICommitRecord commitRecord = new CommitRecord(commitTime,
newCommitCounter, rootAddrs);
- final long commitRecordAddr = write(ByteBuffer
+ this.commitRecordAddr = store.write(ByteBuffer
.wrap(CommitRecordSerializer.INSTANCE
.serialize(commitRecord)));
@@ -3226,8 +3249,8 @@
* Add the commit record to an index so that we can recover
* historical states efficiently.
*/
- _commitRecordIndex.add(commitRecordAddr, commitRecord);
-
+ store._commitRecordIndex.add(commitRecordAddr, commitRecord);
+
/*
* Flush the commit record index to the store and stash the address
* of its metadata record in the root block.
@@ -3239,22 +3262,484 @@
* CommitRecordIndex before we can flush the CommitRecordIndex to
* the store.
*/
- final long commitRecordIndexAddr = _commitRecordIndex
+ commitRecordIndexAddr = store._commitRecordIndex
.writeCheckpoint();
- final long commitToken = quorumToken;
+ }
+
+ /**
+ * The address of the {@link ICommitRecord}.
+ *
+ * @see #writeCommitRecord()
+ */
+ private long commitRecordAddr;
+
+ /**
+ * The address of the {@link CommitRecordIndex} once it has been
+ * checkpointed against the backing store.
+ * <p>
+ * Note: The address of the root of the {@link CommitRecordIndex} needs
+ * to go right into the {@link IRootBlockView}. We are unable to place
+ * it into the {@link ICommitRecord} since we need to serialize the
+ * {@link ICommitRecord}, get its address, and add the entry to the
+ * {@link CommitRecordIndex} before we can flush the
+ * {@link CommitRecordIndex} to the store.
+ *
+ * @see #writeCommitRecord()
+ */
+ private long commitRecordIndexAddr;
+
+ /**
+ * Call commit on {@link IBufferStrategy} prior to creating the new
+ * {@link IRootBlockView}. This will flush the {@link WriteCacheService}
+ * . For HA, that ensures that the write set has been replicated to the
+ * followers.
+ * <p>
+ * Note: required for {@link RWStore} since the metaBits allocations are
+ * not made until commit, leading to invalid addresses for recent store
+ * allocations.
+ * <p>
+ * Note: After this, we do not write anything on the backing store other
+ * than the root block. The rest of this code is dedicated to creating a
+ * properly formed root block. For a non-HA deployment, we just lay down
+ * the root block. For an HA deployment, we do a 2-phase commit.
+ * <p>
+ * Note: In HA, the followers lay down the replicated writes
+ * synchronously. Thus, they are guaranteed to be on local storage by
+ * the time the leader finishes WriteCacheService.flush(). This does not
+ * create much latency because the WriteCacheService drains the
+ * dirtyList in a seperate thread.
+ */
+ private void flushWriteSet() {
+
+ _bufferStrategy.commit();
+
+ }
+
+ /**
+ * Create the new root block.
+ */
+ private void newRootBlock() {
+
+ /*
+ * The next offset at which user data would be written. Calculated,
+ * after commit!
+ */
+ final long nextOffset = _bufferStrategy.getNextOffset();
+
+ final long blockSequence;
+ if (_bufferStrategy instanceof IHABufferStrategy) {
+
+ ...
[truncated message content] |
|
From: <tho...@us...> - 2013-10-31 17:04:42
|
Revision: 7505
http://bigdata.svn.sourceforge.net/bigdata/?rev=7505&view=rev
Author: thompsonbry
Date: 2013-10-31 17:04:33 +0000 (Thu, 31 Oct 2013)
Log Message:
-----------
The issue with the withheld version of QuorumCommitImpl has been tracked down to the non-blocking semantics of HAGlue.getRootBlock(). I have introduced a blocking version of the iHARootBlockRequest message for this method. When that is used from within awaitCommitCounter() in the test suite, then the test suite runs green for the tests that were failing. Thus, this was a data race problem in the test suite.
AbstractServer: clean up dead code.
HAJournalServer: clean up dead code.
AbstractHA3BackupTestCase: clean up import.
AbstractHAJournalServerTestCase: modify awaitCommitCounter() to use the blocking version of HAGlue.getRootBlock(). This fixes a test suite problem where increased concurrency in the 2-phase PREPARE, COMMIT, and ABORT protocols could cause test suite failures.
QuorumCommitImpl: Increased parallelism. The PREPARE, COMMIT, and ABORT behaviors are now executed in parallel on the followers and the leader. Simplified the code patterns for error handling.
(I)HARootBlockRequest: Added boolean parameter for blocking versus non-blocking request.
AbstractJournal: Added support for blocking versus non-blocking request.
See #760 (commit2Phase code review).
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/msg/HARootBlockRequest.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/msg/IHARootBlockRequest.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java 2013-10-31 14:53:27 UTC (rev 7504)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java 2013-10-31 17:04:33 UTC (rev 7505)
@@ -28,10 +28,11 @@
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
+import java.util.concurrent.Callable;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
-import java.util.concurrent.FutureTask;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@@ -43,11 +44,14 @@
import com.bigdata.ha.msg.IHA2PhaseAbortMessage;
import com.bigdata.ha.msg.IHA2PhaseCommitMessage;
import com.bigdata.ha.msg.IHA2PhasePrepareMessage;
+import com.bigdata.ha.msg.IHAMessage;
import com.bigdata.journal.IRootBlockView;
import com.bigdata.quorum.Quorum;
import com.bigdata.quorum.QuorumMember;
import com.bigdata.quorum.QuorumStateChangeListener;
import com.bigdata.quorum.QuorumStateChangeListenerBase;
+import com.bigdata.service.proxy.ThickFuture;
+import com.bigdata.util.InnerCause;
import com.bigdata.util.concurrent.ExecutionExceptions;
/**
@@ -60,50 +64,73 @@
static private transient final Logger log = Logger
.getLogger(QuorumCommitImpl.class);
- protected final QuorumMember<S> member;
-
- /**
- * The downstream service in the write pipeline.
- */
- protected volatile UUID downStreamId = null;
+ private final QuorumMember<S> member;
+ private final ExecutorService executorService;
public QuorumCommitImpl(final QuorumMember<S> member) {
+
+ if (member == null)
+ throw new IllegalArgumentException();
this.member = member;
+ this.executorService = member.getExecutor();
+
}
- protected Quorum<?, ?> getQuorum() {
+ private Quorum<?, ?> getQuorum() {
return member.getQuorum();
}
- protected HACommitGlue getService(final UUID serviceId) {
+ private HACommitGlue getService(final UUID serviceId) {
return member.getService(serviceId);
}
-
+
/**
* Cancel the requests on the remote services (RMI). This is a best effort
* implementation. Any RMI related errors are trapped and ignored in order
* to be robust to failures in RMI when we try to cancel the futures.
+ * <p>
+ * NOte: This is not being done in parallel. However, due to a DGC thread
+ * leak issue, we now use {@link ThickFuture}s. Thus, the tasks that are
+ * being cancelled are all local tasks running on the
+ * {@link #executorService}. If that local task is doing an RMI, then
+ * cancelling it will cause an interrupt in the NIO request.
*/
- protected <F extends Future<T>, T> void cancelRemoteFutures(
- final List<F> remoteFutures) {
+ private <F extends Future<T>, T> void cancelFutures(final List<F> futures) {
if (log.isInfoEnabled())
log.info("");
- for (F rf : remoteFutures) {
+ for (F f : futures) {
+ if (f == null) {
+
+ continue;
+
+ }
+
try {
+
+ if (!f.isDone()) {
+
+ f.cancel(true/* mayInterruptIfRunning */);
- rf.cancel(true/* mayInterruptIfRunning */);
+ }
} catch (Throwable t) {
+
+ if (InnerCause.isInnerCause(t, InterruptedException.class)) {
+ // Propagate interrupt.
+ Thread.currentThread().interrupt();
+
+ }
+
// ignored (to be robust).
}
@@ -139,6 +166,7 @@
* from the prepare message. This metadata is used to decide how the service
* will handle the prepare, commit, and abort messages.
*/
+ @Override
public PrepareResponse prepare2Phase(final PrepareRequest req)
throws InterruptedException, IOException {
@@ -150,9 +178,6 @@
final UUID[] joinedServiceIds = req.getPrepareAndNonJoinedServices()
.getJoinedServiceIds();
-// final Set<UUID> nonJoinedPipelineServiceIds = req
-// .getNonJoinedPipelineServiceIds();
-
final long timeout = req.getTimeout();
final TimeUnit unit = req.getUnit();
@@ -172,35 +197,36 @@
final long begin = System.nanoTime();
final long nanos = unit.toNanos(timeout);
long remaining = nanos;
-
+
/*
- * The leader is a local service. The followers and other service in the
- * pipeline (but not yet joined) are remote services.
+ * Random access list of futures.
+ *
+ * Note: These are *local* Futures. Except for the leader, the Future is
+ * for a task that submits an RMI request. This allows us to run the
+ * PREPARE in parallel for less total latency. On the leader, the task
+ * is run in the caller's thread (on the leader, the caller holds a lock
+ * that we need so we have to execute the behavior in the caller's
+ * thread).
+ *
+ * Note: If a follower was joined as of the atomic decision point, but
+ * did not *participate* in the GATHER protocol, then we still send it
+ * the PREPARE message.
*/
+ final ArrayList<Future<Boolean>> localFutures = new ArrayList<Future<Boolean>>(
+ joinedServiceIds.length);
- // #of remote followers (joined services, excluding the leader).
- final int nfollowers = (joinedServiceIds.length - 1);
+ try {
-// // #of non-joined services in the pipeline.
-// final int nNonJoinedPipelineServices = nonJoinedPipelineServiceIds
-// .size();
+ // #of remote followers (joined services, excluding the leader).
+ final int nfollowers = joinedServiceIds.length - 1;
- // #of remote services (followers plus others in the pipeline).
- final int remoteServiceCount = nfollowers;// + nNonJoinedPipelineServices;
+ for (int i = 0; i <= nfollowers; i++) {
- // Random access list of futures.
- final ArrayList<Future<Boolean>> remoteFutures = new ArrayList<Future<Boolean>>(
- remoteServiceCount);
+ // Pre-size to ensure sufficient room for set(i,foo).
+ localFutures.add(null);
- for (int i = 0; i <= remoteServiceCount; i++) {
-
- // Pre-size to ensure sufficient room for set(i,foo).
- remoteFutures.add(null);
-
- }
-
- try {
-
+ }
+
// Verify the quorum is valid.
member.assertLeader(token);
@@ -240,64 +266,20 @@
rootBlock, timeout, unit);
/*
- * Runnable which will execute this message on the
- * remote service.
- *
- * FIXME Because async futures cause DGC native thread
- * leaks this is no longer running the prepare
- * asynchronously on the followers. Change the code
- * here, and in commit2Phase and abort2Phase to use
- * multiple threads to run the tasks on the followers.
+ * Submit task which will execute this message on the
+ * remote service. We will await this task below.
*/
-
- final HACommitGlue service = getService(serviceId);
-
- Future<Boolean> rf = null;
- try {
- // RMI.
- rf = service.prepare2Phase(msgForJoinedService);
- } catch (final Throwable t) {
- // If anything goes wrong, wrap up exception as Future.
- final FutureTask<Boolean> ft = new FutureTask<Boolean>(new Runnable() {
- public void run() {
- throw new RuntimeException(t);
- }
- }, Boolean.FALSE);
- rf = ft;
- ft.run(); // evaluate future.
- }
+ final Future<Boolean> rf = executorService
+ .submit(new PrepareMessageTask(serviceId,
+ msgForJoinedService));
// add to list of futures we will check.
- remoteFutures.set(i, rf);
+ localFutures.set(i, rf);
}
}
-// // Next, message the pipeline services NOT met with the quorum.
-// {
-//
-// // message for non-joined services.
-// final IHA2PhasePrepareMessage msg = new HA2PhasePrepareMessage(
-// false/* isJoinedService */, rootBlock, timeout, unit);
-//
-// for (UUID serviceId : nonJoinedPipelineServiceIds) {
-//
-// /*
-// * Runnable which will execute this message on the
-// * remote service.
-// */
-// final Future<Boolean> rf = getService(serviceId)
-// .prepare2Phase(msg);
-//
-// // add to list of futures we will check.
-// remoteFutures.set(i, rf);
-//
-// i++;
-//
-// }
-// }
-
/*
* Finally, run the operation on the leader using local method
* call (non-RMI) in the caller's thread to avoid deadlock.
@@ -324,7 +306,7 @@
final Future<Boolean> f = leader
.prepare2Phase(msgForJoinedService);
- remoteFutures.set(0/* index */, f);
+ localFutures.set(0/* index */, f);
}
@@ -334,36 +316,24 @@
* Check futures for all services that were messaged.
*/
int nyes = 0;
- assert remoteFutures.size() == remoteServiceCount + 1;
- final boolean[] votes = new boolean[remoteServiceCount + 1];
- for (int i = 0; i <= remoteServiceCount; i++) {
- final Future<Boolean> rf = remoteFutures.get(i);
- if (rf == null)
+ final boolean[] votes = new boolean[1 + nfollowers];
+ for (int i = 0; i <= nfollowers; i++) {
+ final Future<Boolean> ft = localFutures.get(i);
+ if (ft == null)
throw new AssertionError("null @ index=" + i);
- boolean done = false;
try {
remaining = nanos - (System.nanoTime() - begin);
- final boolean vote = rf
+ final boolean vote = ft
.get(remaining, TimeUnit.NANOSECONDS);
votes[i] = vote;
- if (i < joinedServiceIds.length) {
- // Only the leader and the followers get a vote.
- nyes += vote ? 1 : 0;
- } else {
- // non-joined pipeline service. vote does not count.
- if (!vote) {
- log.warn("Non-joined pipeline service will not prepare");
- }
- }
- done = true;
+ // Only the leader and the followers get a vote.
+ nyes += vote ? 1 : 0;
} catch (CancellationException ex) {
// This Future was cancelled.
log.error(ex, ex);
- done = true; // CancellationException indicates isDone().
} catch (TimeoutException ex) {
// Timeout on this Future.
log.error(ex, ex);
- done = false;
} catch (ExecutionException ex) {
/*
* Note: prepare2Phase() is throwing exceptions if
@@ -373,7 +343,6 @@
* service when attempting to perform the RMI.
*/
log.error(ex, ex);
- done = true; // ExecutionException indicates isDone().
} catch (RuntimeException ex) {
/*
* Note: ClientFuture.get() can throw a RuntimeException if
@@ -382,14 +351,8 @@
*/
log.error(ex, ex);
} finally {
- if (!done) {
- // Cancel the request on the remote service (RMI).
- try {
- rf.cancel(true/* mayInterruptIfRunning */);
- } catch (Throwable t) {
- // ignored.
- }
- }
+ // Note: cancelling a *local* Future wrapping an RMI.
+ ft.cancel(true/*mayInterruptIfRunning*/);
}
}
@@ -415,30 +378,19 @@
return new PrepareResponse(k, nyes, willCommit, votes);
} finally {
- /*
- * Ensure that all futures are cancelled.
- */
- for (Future<Boolean> rf : remoteFutures) {
- if (rf == null) // ignore empty slots.
- continue;
- if (!rf.isDone()) {
- // Cancel the request on the remote service (RMI).
- try {
- rf.cancel(true/* mayInterruptIfRunning */);
- } catch (Throwable t) {
- // ignored.
- }
- }
- }
+
+ cancelFutures(localFutures);
+
}
}
- public void commit2Phase(final CommitRequest req) throws IOException,
+ @Override
+ public void commit2Phase(final CommitRequest commitRequest) throws IOException,
InterruptedException {
if (log.isInfoEnabled())
- log.info("req=" + req);
+ log.info("req=" + commitRequest);
/*
* To minimize latency, we first submit the futures for the other
@@ -460,31 +412,28 @@
* atomic decision point concerning such things in commitNow().]
*/
- final PrepareRequest preq = req.getPrepareRequest();
+ final PrepareRequest prepareRequest = commitRequest.getPrepareRequest();
- final UUID[] joinedServiceIds = preq.getPrepareAndNonJoinedServices()
+ final UUID[] joinedServiceIds = prepareRequest.getPrepareAndNonJoinedServices()
.getJoinedServiceIds();
-// final Set<UUID> nonJoinedPipelineServiceIds = preq
-// .getNonJoinedPipelineServiceIds();
-
- final long token = preq.getRootBlock().getQuorumToken();
+ final long token = prepareRequest.getRootBlock().getQuorumToken();
- final long commitTime = preq.getRootBlock().getLastCommitTime();
+ final long commitTime = prepareRequest.getRootBlock().getLastCommitTime();
- final PrepareResponse presp = req.getPrepareResponse();
+ final PrepareResponse prepareResponse = commitRequest.getPrepareResponse();
// true iff we have a full complement of services that vote YES for this
// commit.
- final boolean didAllServicesPrepare = presp.getYesCount() == presp
+ final boolean didAllServicesPrepare = prepareResponse.getYesCount() == prepareResponse
.replicationFactor();
- member.assertLeader(token);
-
- final List<Future<Void>> remoteFutures = new LinkedList<Future<Void>>();
+ final List<Future<Void>> localFutures = new LinkedList<Future<Void>>();
try {
+ member.assertLeader(token);
+
final IHA2PhaseCommitMessage msgJoinedService = new HA2PhaseCommitMessage(
true/* isJoinedService */, commitTime, didAllServicesPrepare);
@@ -492,7 +441,7 @@
final UUID serviceId = joinedServiceIds[i];
- if (!presp.getVote(i)) {
+ if (!prepareResponse.getVote(i)) {
// Skip services that did not vote YES in PREPARE.
continue;
@@ -500,38 +449,18 @@
}
/*
- * Runnable which will execute this message on the remote
- * service.
+ * Submit task on local executor. The task will do an RMI to the
+ * remote service.
*/
- final Future<Void> rf = getService(serviceId).commit2Phase(
- msgJoinedService);
+ final Future<Void> rf = executorService
+ .submit(new CommitMessageTask(serviceId,
+ msgJoinedService));
// add to list of futures we will check.
- remoteFutures.add(rf);
+ localFutures.add(rf);
}
-// if (!nonJoinedPipelineServiceIds.isEmpty()) {
-//
-// final IHA2PhaseCommitMessage msgNonJoinedService = new HA2PhaseCommitMessage(
-// false/* isJoinedService */, commitTime);
-//
-// for (UUID serviceId : nonJoinedPipelineServiceIds) {
-//
-// /*
-// * Runnable which will execute this message on the remote
-// * service.
-// */
-// final Future<Void> rf = getService(serviceId).commit2Phase(
-// msgNonJoinedService);
-//
-// // add to list of futures we will check.
-// remoteFutures.add(rf);
-//
-// }
-//
-// }
-
{
/*
* Run the operation on the leader using local method call in
@@ -542,7 +471,7 @@
final Future<Void> f = leader.commit2Phase(msgJoinedService);
- remoteFutures.add(f);
+ localFutures.add(f);
}
@@ -550,11 +479,9 @@
* Check the futures for the other services in the quorum.
*/
final List<Throwable> causes = new LinkedList<Throwable>();
- for (Future<Void> rf : remoteFutures) {
- boolean done = false;
+ for (Future<Void> ft : localFutures) {
try {
- rf.get(); // TODO Timeout to await followers in commit2Phase().
- done = true;
+ ft.get(); // FIXME Timeout to await followers in commit2Phase().
// } catch (TimeoutException ex) {
// // Timeout on this Future.
// log.error(ex, ex);
@@ -564,11 +491,9 @@
// Future was cancelled.
log.error(ex, ex);
causes.add(ex);
- done = true; // Future is done since cancelled.
} catch (ExecutionException ex) {
log.error(ex, ex);
causes.add(ex);
- done = true; // Note: ExecutionException indicates isDone().
} catch (RuntimeException ex) {
/*
* Note: ClientFuture.get() can throw a RuntimeException
@@ -578,14 +503,8 @@
log.error(ex, ex);
causes.add(ex);
} finally {
- if (!done) {
- // Cancel the request on the remote service (RMI).
- try {
- rf.cancel(true/* mayInterruptIfRunning */);
- } catch (Throwable t) {
- // ignored.
- }
- }
+ // Note: cancelling a *local* Future wrapping an RMI.
+ ft.cancel(true/* mayInterruptIfRunning */);
}
}
@@ -593,8 +512,6 @@
* If there were any errors, then throw an exception listing them.
*/
if (!causes.isEmpty()) {
- // Cancel remote futures.
- cancelRemoteFutures(remoteFutures);
// Throw exception back to the leader.
if (causes.size() == 1)
throw new RuntimeException(causes.get(0));
@@ -603,28 +520,22 @@
}
} finally {
- /*
- * Ensure that all futures are cancelled.
- */
- for (Future<Void> rf : remoteFutures) {
- if (!rf.isDone()) {
- // Cancel the request on the remote service (RMI).
- try {
- rf.cancel(true/* mayInterruptIfRunning */);
- } catch (Throwable t) {
- // ignored.
- }
- }
- }
+
+ // Ensure that all futures are cancelled.
+ cancelFutures(localFutures);
+
}
}
/**
+ * {@inheritDoc}
+ *
* FIXME Only issue abort to services that voted YES in prepare? [We have
* that information in commitNow(), but we do not have the atomic set of
* joined services in AbstractJournal.abort())].
*/
+ @Override
public void abort2Phase(final long token) throws IOException,
InterruptedException {
@@ -632,14 +543,6 @@
log.info("token=" + token);
/*
- * To minimize latency, we first submit the futures for the other
- * services and then do f.run() on the leader. This will allow the other
- * services to commit concurrently with the leader's IO.
- */
-
- final List<Future<Void>> remoteFutures = new LinkedList<Future<Void>>();
-
- /*
* For services (other than the leader) in the quorum, submit the
* RunnableFutures to an Executor.
*/
@@ -649,6 +552,12 @@
final IHA2PhaseAbortMessage msg = new HA2PhaseAbortMessage(token);
+ /*
+ * To minimize latency, we first submit the futures for the other
+ * services and then do f.run() on the leader.
+ */
+ final List<Future<Void>> localFutures = new LinkedList<Future<Void>>();
+
try {
for (int i = 1; i < joinedServiceIds.length; i++) {
@@ -656,13 +565,14 @@
final UUID serviceId = joinedServiceIds[i];
/*
- * Runnable which will execute this message on the remote
- * service.
+ * Submit task on local executor. The task will do an RMI to the
+ * remote service.
*/
- final Future<Void> rf = getService(serviceId).abort2Phase(msg);
+ final Future<Void> rf = executorService
+ .submit(new AbortMessageTask(serviceId, msg));
// add to list of futures we will check.
- remoteFutures.add(rf);
+ localFutures.add(rf);
}
@@ -674,25 +584,22 @@
member.assertLeader(token);
final S leader = member.getService();
final Future<Void> f = leader.abort2Phase(msg);
- remoteFutures.add(f);
+ localFutures.add(f);
}
/*
* Check the futures for the other services in the quorum.
*/
final List<Throwable> causes = new LinkedList<Throwable>();
- for (Future<Void> rf : remoteFutures) {
- boolean done = false;
+ for (Future<Void> ft : localFutures) {
try {
- rf.get();
- done = true;
+ ft.get(); // TODO Timeout for abort?
} catch (InterruptedException ex) {
log.error(ex, ex);
causes.add(ex);
} catch (ExecutionException ex) {
log.error(ex, ex);
causes.add(ex);
- done = true; // Note: ExecutionException indicates isDone().
} catch (RuntimeException ex) {
/*
* Note: ClientFuture.get() can throw a RuntimeException
@@ -702,14 +609,8 @@
log.error(ex, ex);
causes.add(ex);
} finally {
- if (!done) {
- // Cancel the request on the remote service (RMI).
- try {
- rf.cancel(true/* mayInterruptIfRunning */);
- } catch (Throwable t) {
- // ignored.
- }
- }
+ // Note: cancelling a *local* Future wrapping an RMI.
+ ft.cancel(true/* mayInterruptIfRunning */);
}
}
@@ -717,11 +618,10 @@
* If there were any errors, then throw an exception listing them.
*
* TODO But only throw an exception for the joined services.
- * Non-joined services, we just long an error.
+ * Non-joined services, we just long an error (or simply do not tell
+ * them to do an abort()).
*/
if (!causes.isEmpty()) {
- // Cancel remote futures.
- cancelRemoteFutures(remoteFutures);
// Throw exception back to the leader.
if (causes.size() == 1)
throw new RuntimeException(causes.get(0));
@@ -730,22 +630,133 @@
}
} finally {
+
+ // Ensure that all futures are cancelled.
+ cancelFutures(localFutures);
+
+ }
+
+ }
+
+ /**
+ * Helper class submits the RMI for a PREPARE, COMMIT, or ABORT message.
+ * This is used to execute the different requests in parallel on a local
+ * executor service.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan
+ * Thompson</a>
+ */
+ private abstract class AbstractMessageTask<T, M extends IHAMessage>
+ implements Callable<T> {
+
+ private final UUID serviceId;
+ protected final M msg;
+
+ public AbstractMessageTask(final UUID serviceId, final M msg) {
+
+ this.serviceId = serviceId;
+
+ this.msg = msg;
+
+ }
+
+ @Override
+ final public T call() throws Exception {
+
/*
- * Ensure that all futures are cancelled.
+ * Note: This code MAY be interrupted at any point if the Future for
+ * the task is cancelled. If it is interrupted during the RMI, then
+ * the expectation is that the N...
[truncated message content] |
|
From: <tho...@us...> - 2013-11-01 13:34:18
|
Revision: 7506
http://bigdata.svn.sourceforge.net/bigdata/?rev=7506&view=rev
Author: thompsonbry
Date: 2013-11-01 13:34:10 +0000 (Fri, 01 Nov 2013)
Log Message:
-----------
javadoc related to QuorumServer.getService(UUID) as implemented by HAQuorumService.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-10-31 17:04:33 UTC (rev 7505)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-11-01 13:34:10 UTC (rev 7506)
@@ -8001,15 +8001,17 @@
final UUID leaderId = req.getLeaderId();
+ // Note: Will throw exception if our HAQuorumService is not running.
final HAGlue leader = getQuorum().getClient().getService(leaderId);
if (leader == null)
throw new RuntimeException(
"Could not discover the quorum leader.");
+ // Get our serviceId.
final UUID serviceId = getServiceId();
- if(serviceId == null)
+ if (serviceId == null)
throw new AssertionError();
final Callable<IHANotifyReleaseTimeResponse> task = ((AbstractHATransactionService) AbstractJournal.this
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-10-31 17:04:33 UTC (rev 7505)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-11-01 13:34:10 UTC (rev 7506)
@@ -1689,11 +1689,29 @@
}
/**
- * Resolve an {@link HAGlue} object from its Service UUID.
+ * {@inheritDoc}
+ * <p>
+ * This implementation resolves an {@link HAGlue} object from its
+ * Service UUID using the <strong>pre-existing</strong> connection for
+ * the {@link HAClient} and the cached service discovery lookup for that
+ * connection. If the {@link HAClient} is not connected, then an
+ * {@link IllegalStateException} will be thrown.
+ *
+ * @param serviceId
+ * The {@link UUID} of the service to be resolved.
+ *
+ * @return The proxy for the service having the specified {@link UUID}
+ * and never <code>null</code>.
+ *
+ * @throws IllegalStateException
+ * if the {@link HAClient} is not connected.
+ * @throws QuorumException
+ * if no service can be discovered for that {@link UUID}.
*/
@Override
public S getService(final UUID serviceId) {
+ // Throws IllegalStateException if not connected (HAClient).
final HAGlueServicesClient discoveryClient = server
.getHAClient().getConnection().getHAGlueServicesClient();
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2013-11-01 21:10:53
|
Revision: 7507
http://bigdata.svn.sourceforge.net/bigdata/?rev=7507&view=rev
Author: thompsonbry
Date: 2013-11-01 21:10:45 +0000 (Fri, 01 Nov 2013)
Log Message:
-----------
Continued work on #760 (2-phase commit semantics review)
I have analyzed the root cause of the override test in which B is told to reject the commit. The test actually relies on an exception being thrown back when the commit2Phase message is received. Thus, B is not actually encountering an error when handling that message and is not being transitioned automatically to the ERROR state.
I have further cleaned up the 2-phase commit logic in both QuorumCommitImpl? and AbstractJournal?. The COMMIT behavior for 2-phase commit is now rejected IFF we lack a majority of services that successfully executed the COMMIT (and for which the leader has received notice of that success). The leader will failover if the COMMIT phase fails and the transaction will be failed. (If the PREPARE phase fails, then the transaction is failed, but the leader does not fail over).
I am now encountering a problem with this test where B goes into the Operator state because it is looking for an HALog that does not exist. I had observed this in the recent longevity tests. Now we have a unit test that can recreate the problem, which is great.
http://192.168.1.133:8091 : is not joined, pipelineOrder=2, writePipelineAddr=localhost/127.0.0.1:9091, service=other, extendedRunState={server=Running, quorumService=Operator @ 1, haReady=-1, haStatus=NotReady, serviceId=e832b5c0-6c9b-444d-8946-514eacc5e329, now=1383332021358, msg=[HALog not available: commitCounter=2]}
QuorumCommitImpl? was modified to return a class containing a summary of the commit response. This makes it possible to write more flexible behaviors in AbstractJournal?.commitNow().
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumCommit.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/CommitResponse.java
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/CommitResponse.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/CommitResponse.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/CommitResponse.java 2013-11-01 21:10:45 UTC (rev 7507)
@@ -0,0 +1,150 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*
+ * Created on Nov 1, 2013
+ */
+package com.bigdata.ha;
+
+import java.util.ArrayList;
+
+import com.bigdata.util.concurrent.ExecutionExceptions;
+
+/**
+ * Response for a 2-phase commit.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+public class CommitResponse {
+
+ /**
+ * The COMMIT message.
+ */
+ private final CommitRequest req;
+
+ /**
+ * An array of the root cause exceptions for any errors encountered when
+ * instructing the services to execute the COMMIT message. The indices into
+ * this collection are correlated with the service join order and the
+ * PREPARE vote order. The leader is always at index zero.
+ */
+ private final ArrayList<Throwable> causes;
+
+ /**
+ * The number of COMMIT messages that are known to have been processed
+ * successfully.
+ */
+ private final int nok;
+ /**
+ * The number of COMMIT messages that were issued and which failed.
+ */
+ private final int nfail;
+
+ public CommitResponse(final CommitRequest req,
+ final ArrayList<Throwable> causes) {
+
+ this.req = req;
+ this.causes = causes;
+
+ int nok = 0, nfail = 0;
+
+ for (Throwable t : causes) {
+
+ if (t == null)
+ nok++; // request issued and was Ok.
+ else
+ nfail++; // request issued and failed.
+
+ }
+
+ this.nok = nok;
+ this.nfail = nfail;
+
+ }
+
+ public boolean isLeaderOk() {
+
+ return causes.get(0) == null;
+
+ }
+
+ /**
+ * Number of COMMIT messages that were generated and succeeded.
+ */
+ public int getNOk() {
+
+ return nok;
+
+ }
+
+ /**
+ * Number of COMMIT messages that were generated and failed.
+ */
+ public int getNFail() {
+
+ return nfail;
+
+ }
+
+ /**
+ * Return the root cause for the ith service -or- <code>null</code> if the
+ * COMMIT did not produce an exception for that service.
+ */
+ public Throwable getCause(final int i) {
+
+ return causes.get(i);
+
+ }
+
+ /**
+ * Throw out the exception(s).
+ * <p>
+ * Note: This method is guaranteed to not return normally!
+ *
+ * @throws Exception
+ * if one or more services that voted YES failed the COMMIT.
+ *
+ * @throws IllegalStateException
+ * if all services that voted YES succeeded.
+ */
+ public void throwCauses() throws Exception {
+
+ if (causes.isEmpty()) {
+
+ // There were no errors.
+ throw new IllegalStateException();
+
+ }
+
+ // Throw exception back to the leader.
+ if (causes.size() == 1)
+ throw new Exception(causes.get(0));
+
+ final int k = req.getPrepareResponse().replicationFactor();
+
+ throw new Exception("replicationFactor=" + k + ", nok=" + nok
+ + ", nfail=" + nfail, new ExecutionExceptions(causes));
+
+ }
+
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumCommit.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumCommit.java 2013-11-01 13:34:10 UTC (rev 7506)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumCommit.java 2013-11-01 21:10:45 UTC (rev 7507)
@@ -28,10 +28,8 @@
package com.bigdata.ha;
import java.io.IOException;
-import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-import com.bigdata.journal.IRootBlockView;
import com.bigdata.quorum.Quorum;
/**
@@ -64,12 +62,13 @@
/**
* Used by the leader to send a message to each joined service in the quorum
* telling it to commit using the root block from the corresponding
- * {@link #prepare2Phase(IRootBlockView, long, TimeUnit) prepare} message.
- * The commit MAY NOT go forward unless both the current quorum token and
- * the lastCommitTime on this message agree with the quorum token and
+ * {@link #prepare2Phase(PrepareRequest) prepare} message. The commit MAY
+ * NOT go forward unless both the current quorum token and the
+ * lastCommitTime on this message agree with the quorum token and
* lastCommitTime in the root block from the last "prepare" message.
*/
- void commit2Phase(CommitRequest req) throws IOException, InterruptedException;
+ CommitResponse commit2Phase(CommitRequest req) throws IOException,
+ InterruptedException;
/**
* Used by the leader to send a message to each service joined with the
@@ -80,6 +79,6 @@
* @param token
* The quorum token.
*/
- void abort2Phase(final long token) throws IOException, InterruptedException;
+ void abort2Phase(long token) throws IOException, InterruptedException;
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java 2013-11-01 13:34:10 UTC (rev 7506)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java 2013-11-01 21:10:45 UTC (rev 7507)
@@ -386,8 +386,8 @@
}
@Override
- public void commit2Phase(final CommitRequest commitRequest) throws IOException,
- InterruptedException {
+ public CommitResponse commit2Phase(final CommitRequest commitRequest)
+ throws IOException, InterruptedException {
if (log.isInfoEnabled())
log.info("req=" + commitRequest);
@@ -428,10 +428,26 @@
final boolean didAllServicesPrepare = prepareResponse.getYesCount() == prepareResponse
.replicationFactor();
- final List<Future<Void>> localFutures = new LinkedList<Future<Void>>();
-
+ /*
+ * Note: These entries are in service join order. The first entry is
+ * always the leader. If a services did not vote YES for the PREPARE
+ * then it will have a null entry in this list.
+ */
+ final ArrayList<Future<Void>> localFutures = new ArrayList<Future<Void>>(
+ joinedServiceIds.length);
+
+ final ArrayList<Throwable> causes = new ArrayList<Throwable>();
+
try {
+ for (int i = 0; i < joinedServiceIds.length; i++) {
+
+ // Pre-size to ensure sufficient room for set(i,foo).
+ localFutures.add(null);
+ causes.add(null);
+
+ }
+
member.assertLeader(token);
final IHA2PhaseCommitMessage msgJoinedService = new HA2PhaseCommitMessage(
@@ -439,8 +455,6 @@
for (int i = 1; i < joinedServiceIds.length; i++) {
- final UUID serviceId = joinedServiceIds[i];
-
if (!prepareResponse.getVote(i)) {
// Skip services that did not vote YES in PREPARE.
@@ -448,6 +462,8 @@
}
+ final UUID serviceId = joinedServiceIds[i];
+
/*
* Submit task on local executor. The task will do an RMI to the
* remote service.
@@ -457,7 +473,7 @@
msgJoinedService));
// add to list of futures we will check.
- localFutures.add(rf);
+ localFutures.set(i, rf);
}
@@ -471,37 +487,38 @@
final Future<Void> f = leader.commit2Phase(msgJoinedService);
- localFutures.add(f);
+ localFutures.set(0/* leader */, f);
}
/*
* Check the futures for the other services in the quorum.
*/
- final List<Throwable> causes = new LinkedList<Throwable>();
- for (Future<Void> ft : localFutures) {
+ for (int i = 0; i < joinedServiceIds.length; i++) {
+ final Future<Void> ft = localFutures.get(i);
+ if (ft == null)
+ continue;
try {
ft.get(); // FIXME Timeout to await followers in commit2Phase().
// } catch (TimeoutException ex) {
// // Timeout on this Future.
// log.error(ex, ex);
-// causes.add(ex);
-// done = false;
+// causes.set(i, ex);
} catch (CancellationException ex) {
// Future was cancelled.
log.error(ex, ex);
- causes.add(ex);
+ causes.set(i, ex);
} catch (ExecutionException ex) {
log.error(ex, ex);
- causes.add(ex);
+ causes.set(i, ex);
} catch (RuntimeException ex) {
/*
- * Note: ClientFuture.get() can throw a RuntimeException
- * if there is a problem with the RMI call. In this case
- * we do not know whether the Future is done.
+ * Note: ClientFuture.get() can throw a RuntimeException if
+ * there is a problem with the RMI call. In this case we do
+ * not know whether the Future is done.
*/
log.error(ex, ex);
- causes.add(ex);
+ causes.set(i, ex);
} finally {
// Note: cancelling a *local* Future wrapping an RMI.
ft.cancel(true/* mayInterruptIfRunning */);
@@ -511,13 +528,7 @@
/*
* If there were any errors, then throw an exception listing them.
*/
- if (!causes.isEmpty()) {
- // Throw exception back to the leader.
- if (causes.size() == 1)
- throw new RuntimeException(causes.get(0));
- throw new RuntimeException("remote errors: nfailures="
- + causes.size(), new ExecutionExceptions(causes));
- }
+ return new CommitResponse(commitRequest, causes);
} finally {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java 2013-11-01 13:34:10 UTC (rev 7506)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java 2013-11-01 21:10:45 UTC (rev 7507)
@@ -314,10 +314,10 @@
}
@Override
- public void commit2Phase(final CommitRequest req) throws IOException,
+ public CommitResponse commit2Phase(final CommitRequest req) throws IOException,
InterruptedException {
- commitImpl.commit2Phase(req);
+ return commitImpl.commit2Phase(req);
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-11-01 13:34:10 UTC (rev 7506)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-11-01 21:10:45 UTC (rev 7507)
@@ -96,6 +96,7 @@
import com.bigdata.counters.CounterSet;
import com.bigdata.counters.Instrument;
import com.bigdata.ha.CommitRequest;
+import com.bigdata.ha.CommitResponse;
import com.bigdata.ha.HAGlue;
import com.bigdata.ha.HAStatusEnum;
import com.bigdata.ha.HATXSGlue;
@@ -3527,6 +3528,26 @@
/**
* HA mode commit (2-phase commit).
+ */
+ private void commitHA() {
+
+ try {
+
+ prepare2Phase();
+
+ commit2Phase();
+
+ } catch (Exception e) {
+
+ // launder throwable.
+ throw new RuntimeException(e);
+
+ }
+
+ }
+
+ /**
+ * PREPARE
* <p>
* Note: We need to make an atomic decision here regarding whether a
* service is joined with the met quorum or not. This information will
@@ -3541,12 +3562,37 @@
* metadata for the znode that is the parent of the joined services.
* However, we would need an expanded interface to get that metadata
* from zookeeper out of the Quorum.
+ *
+ * @throws IOException
+ * @throws TimeoutException
+ * @throws InterruptedException
*/
- private void commitHA() {
-
+ private void prepare2Phase() throws InterruptedException,
+ TimeoutException, IOException {
+
+ boolean didPrepare = false;
try {
- if(!prepare2Phase()) {
+ // Atomic decision point for joined vs non-joined services.
+ prepareJoinedAndNonJoinedServices = new JoinedAndNonJoinedServices(
+ quorum);
+
+ prepareRequest = new PrepareRequest(//
+ consensusReleaseTime,//
+ gatherJoinedAndNonJoinedServices,//
+ prepareJoinedAndNonJoinedServices,//
+ newRootBlock,//
+ quorumService.getPrepareTimeout(), // timeout
+ TimeUnit.MILLISECONDS//
+ );
+
+ // issue prepare request.
+ prepareResponse = quorumService.prepare2Phase(prepareRequest);
+
+ if (haLog.isInfoEnabled())
+ haLog.info(prepareResponse.toString());
+
+ if (!prepareResponse.willCommit()) {
// PREPARE rejected.
throw new QuorumException("PREPARE rejected: nyes="
@@ -3554,15 +3600,12 @@
+ prepareResponse.replicationFactor());
}
+ didPrepare = true;
- commitRequest = new CommitRequest(prepareRequest,
- prepareResponse);
+ } finally {
- quorumService.commit2Phase(commitRequest);
+ if (!didPrepare) {
- } catch (Throwable e) {
-
- try {
/*
* Something went wrong. Any services that were in the
* pipeline could have a dirty write set. Services that
@@ -3581,13 +3624,74 @@
* breaks, then the services will move into the Error state
* and will do a local abort as part of that transition.
*/
- quorumService.abort2Phase(commitToken);
- } catch (Throwable t) {
- log.warn(t, t);
+
+ try {
+ quorumService.abort2Phase(commitToken);
+ } catch (Throwable t) {
+ log.warn(t, t);
+ }
+
}
- if (commitRequest != null) {
+ }
+
+ }
+ // Fields set by the method above.
+ private IJoinedAndNonJoinedServices prepareJoinedAndNonJoinedServices;
+ private PrepareRequest prepareRequest;
+ private PrepareResponse prepareResponse;
+
+ /**
+ * COMMIT.
+ *
+ * Pre-condition: PREPARE was successful on a majority of the services.
+ */
+ private void commit2Phase() throws Exception {
+
+ boolean didCommit = false;
+ try {
+
+ /*
+ * Prepare was successful. COMMIT message has been formed. We
+ * will now commit.
+ *
+ * Note: The overall commit will fail unless we can prove that a
+ * majority of the services successfully committed.
+ */
+
+ commitRequest = new CommitRequest(prepareRequest,
+ prepareResponse);
+
+ commitResponse = quorumService.commit2Phase(commitRequest);
+
+ if (!store.quorum.isQuorum(commitResponse.getNOk())) {
+
/*
+ * Fail the commit.
+ *
+ * Note: An insufficient number of services were able to
+ * COMMIT successfully.
+ *
+ * Note: It is possible that a commit could be failed here
+ * when the commit is in fact stable on a majority of
+ * services. For example, with k=3 and 2 services running if
+ * both of them correctly update their root blocks but we
+ * lose network connectivity to the follower before the RMI
+ * returns, then we will fail the commit.
+ */
+
+ // Note: Guaranteed to not return normally!
+ commitResponse.throwCauses();
+
+ }
+
+ didCommit = true;
+
+ } finally {
+
+ if (!didCommit) {
+
+ /*
* The quorum voted to commit, but something went wrong.
*
* This forces the leader to fail over. The quorum can then
@@ -3614,55 +3718,18 @@
* another such that it will apply those HALog files on
* restart and form a consensus with the other services.
*/
+
quorumService.enterErrorState();
+
}
-
- // Re-throw the root cause exception.
- throw new RuntimeException(e);
}
}
// Fields set by the method above.
private CommitRequest commitRequest;
+ private CommitResponse commitResponse;
- /**
- * Return <code>true</code> iff the 2-phase PREPARE votes to COMMIT.
- *
- * @throws IOException
- * @throws TimeoutException
- * @throws InterruptedException
- */
- private boolean prepare2Phase() throws InterruptedException,
- TimeoutException, IOException {
-
- // Atomic decision point for joined vs non-joined services.
- prepareJoinedAndNonJoinedServices = new JoinedAndNonJoinedServices(
- quorum);
-
- prepareRequest = new PrepareRequest(//
- consensusReleaseTime,//
- gatherJoinedAndNonJoinedServices,//
- prepareJoinedAndNonJoinedServices,//
- newRootBlock,//
- quorumService.getPrepareTimeout(), // timeout
- TimeUnit.MILLISECONDS//
- );
-
- // issue prepare request.
- prepareResponse = quorumService.prepare2Phase(prepareRequest);
-
- if (haLog.isInfoEnabled())
- haLog.info(prepareResponse.toString());
-
- return prepareResponse.willCommit();
-
- }
- // Fields set by the method above.
- private IJoinedAndNonJoinedServices prepareJoinedAndNonJoinedServices;
- private PrepareRequest prepareRequest;
- private PrepareResponse prepareResponse;
-
} // class CommitState.
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java 2013-11-01 13:34:10 UTC (rev 7506)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java 2013-11-01 21:10:45 UTC (rev 7507)
@@ -494,7 +494,7 @@
*
* TODO Consider leader failure scenarios in this test suite, not just
* scenarios where B fails. We MUST also cover failures of C (the 2nd
- * follower). We should also cover scenariors where the quorum is barely met
+ * follower). We should also cover scenarios where the quorum is barely met
* and a single failure causes a rejected commit (local decision) or 2-phase
* abort (joined services in joint agreement).
*
@@ -515,7 +515,17 @@
awaitCommitCounter(1L, startup.serverA, startup.serverB,
startup.serverC);
- // Setup B to fail the "COMMIT" message.
+ /*
+ * Setup B to fail the "COMMIT" message (specifically, it will throw
+ * back an exception rather than executing the commit.
+ *
+ * FIXME We need to cause B to actually fail the commit such that it
+ * enters the ERROR state. This is only causing the RMI to be rejected
+ * so B is not being failed out of the pipeline. Thus, B will remain
+ * joined with the met quorum (but at the wrong commit point) until we
+ * send down another replicated write. At that point B will notice that
+ * it is out of whack and enter the ERROR state.
+ */
((HAGlueTest) startup.serverB)
.failNext("commit2Phase",
new Class[] { IHA2PhaseCommitMessage.class },
@@ -543,13 +553,24 @@
// Should be two commit points on {A,C].
awaitCommitCounter(2L, startup.serverA, startup.serverC);
+ // Just one commit point on B.
+ awaitCommitCounter(1L, startup.serverB);
+
+ // B is still a follower.
+ awaitHAStatus(startup.serverB, HAStatusEnum.Follower);
+
/*
* B should go into an ERROR state and then into SeekConsensus and
* from there to RESYNC and finally back to RunMet. We can not
* reliably observe the intervening states. So what we really need
* to do is watch for B to move to the end of the pipeline and catch
* up to the same commit point.
+ *
+ * FIXME This is forcing B into an error state to simulate what
+ * would happen if B had encountered an error during the 2-phase
+ * commit above.
*/
+ ((HAGlueTest)startup.serverB).enterErrorState();
/*
* The pipeline should be reordered. B will do a service leave, then
@@ -558,6 +579,8 @@
awaitPipeline(new HAGlue[] { startup.serverA, startup.serverC,
startup.serverB });
+ awaitFullyMetQuorum();
+
/*
* There should be two commit points on {A,C,B} (note that this
* assert does not pay attention to the pipeline order).
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|