From: <mrp...@us...> - 2010-10-05 02:33:17
|
Revision: 3729 http://bigdata.svn.sourceforge.net/bigdata/?rev=3729&view=rev Author: mrpersonick Date: 2010-10-05 02:33:11 +0000 (Tue, 05 Oct 2010) Log Message: ----------- incremental progress on change sets: simple add/remove Modified Paths: -------------- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ChangeRecord.java branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/IChangeRecord.java branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java Added Paths: ----------- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexMutation.java Modified: branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java 2010-10-04 23:12:46 UTC (rev 3728) +++ branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java 2010-10-05 02:33:11 UTC (rev 3729) @@ -48,6 +48,9 @@ import com.bigdata.rdf.model.BigdataValue; import com.bigdata.rdf.model.BigdataValueFactory; import com.bigdata.rdf.model.StatementEnum; +import com.bigdata.rdf.sail.changesets.ChangeRecord; +import com.bigdata.rdf.sail.changesets.IChangeLog; +import com.bigdata.rdf.sail.changesets.IChangeRecord.ChangeAction; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.SPO; import com.bigdata.rdf.store.AbstractTripleStore; @@ -252,6 +255,16 @@ private boolean readOnly = false; + public void setChangeLog(final IChangeLog changeLog) { + + this.changeLog = changeLog; + + } + + protected IChangeLog changeLog; + + + /** * Create a buffer that converts Sesame {@link Value} objects to {@link SPO}s * and writes on the <i>database</i> when it is {@link #flush()}ed. This @@ -297,7 +310,7 @@ */ public StatementBuffer(final TempTripleStore statementStore, final AbstractTripleStore database, final int capacity) { - + if (database == null) throw new IllegalArgumentException(); @@ -362,7 +375,7 @@ * @todo this implementation always returns ZERO (0). */ public long flush() { - + log.info(""); /* @@ -874,6 +887,13 @@ if (tmp[i].isModified()) { stmts[i].setModified(true); + + if (changeLog != null) { + + changeLog.changeEvent( + new ChangeRecord(stmts[i], ChangeAction.ADDED)); + + } } Added: branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexMutation.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexMutation.java (rev 0) +++ branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexMutation.java 2010-10-05 02:33:11 UTC (rev 3729) @@ -0,0 +1,11 @@ +package com.bigdata.rdf.spo; + +public enum SPOIndexMutation { + + ADDED, + + REMOVED, + + TYPE_CHANGE + +} Modified: branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2010-10-04 23:12:46 UTC (rev 3728) +++ branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2010-10-05 02:33:11 UTC (rev 3729) @@ -129,8 +129,10 @@ import com.bigdata.rdf.rio.StatementBuffer; import com.bigdata.rdf.rules.BackchainAccessPath; import com.bigdata.rdf.rules.InferenceEngine; +import com.bigdata.rdf.sail.changesets.ChangeRecord; import com.bigdata.rdf.sail.changesets.IChangeLog; import com.bigdata.rdf.sail.changesets.IChangeRecord; +import com.bigdata.rdf.sail.changesets.IChangeRecord.ChangeAction; import com.bigdata.rdf.spo.ExplicitSPOFilter; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.InferredSPOFilter; @@ -1447,6 +1449,8 @@ // FIXME bnodes : must also track the reverse mapping [bnodes2]. assertBuffer.setBNodeMap(bnodes); + + assertBuffer.setChangeLog(changeLog); } @@ -2278,7 +2282,7 @@ } // #of explicit statements removed. - final long n; + long n = 0; if (getTruthMaintenance()) { @@ -2319,7 +2323,42 @@ * buffered). */ - n = database.removeStatements(s, p, o, c); + if (changeLog == null) { + + n = database.removeStatements(s, p, o, c); + + } else { + + final IAccessPath<ISPO> ap = + database.getAccessPath(s, p, o, c); + + final IChunkedOrderedIterator<ISPO> itr = ap.iterator(); + + if (itr.hasNext()) { + + final BigdataStatementIteratorImpl itr2 = + new BigdataStatementIteratorImpl(database, bnodes2, itr) + .start(database.getExecutorService()); + + final BigdataStatement[] stmts = + new BigdataStatement[database.getChunkCapacity()]; + + int i = 0; + while (i < stmts.length && itr2.hasNext()) { + stmts[i++] = itr2.next(); + if (i == stmts.length) { + // process stmts[] + n += removeAndNotify(stmts, i); + i = 0; + } + } + if (i > 0) { + n += removeAndNotify(stmts, i); + } + + } + + } } @@ -2327,7 +2366,70 @@ return (int) Math.min(Integer.MAX_VALUE, n); } + + private long removeAndNotify(final BigdataStatement[] stmts, final int numStmts) { + + final SPO[] tmp = new SPO[numStmts]; + for (int i = 0; i < tmp.length; i++) { + + final BigdataStatement stmt = stmts[i]; + + /* + * Note: context position is not passed when statement identifiers + * are in use since the statement identifier is assigned based on + * the {s,p,o} triple. + */ + + final SPO spo = new SPO(stmt); + + if (log.isDebugEnabled()) + log.debug("adding: " + stmt.toString() + " (" + spo + ")"); + + if(!spo.isFullyBound()) { + + throw new AssertionError("Not fully bound? : " + spo); + + } + + tmp[i] = spo; + + } + + /* + * Note: When handling statement identifiers, we clone tmp[] to avoid a + * side-effect on its order so that we can unify the assigned statement + * identifiers below. + * + * Note: In order to report back the [ISPO#isModified()] flag, we also + * need to clone tmp[] to avoid a side effect on its order. Therefore we + * now always clone tmp[]. + */ +// final long nwritten = writeSPOs(sids ? tmp.clone() : tmp, numStmts); + final long nwritten = database.removeStatements(tmp.clone(), numStmts); + + // Copy the state of the isModified() flag + { + + for (int i = 0; i < numStmts; i++) { + + if (tmp[i].isModified()) { + + stmts[i].setModified(true); + + changeLog.changeEvent( + new ChangeRecord(stmts[i], ChangeAction.REMOVED)); + + } + + } + + } + + return nwritten; + + } + public synchronized CloseableIteration<? extends Resource, SailException> getContextIDs() throws SailException { @@ -2420,6 +2522,12 @@ // discard the write set. database.abort(); + if (changeLog != null) { + + changeLog.transactionAborted(); + + } + } /** @@ -2444,6 +2552,12 @@ database.commit(); + if (changeLog != null) { + + changeLog.transactionCommited(); + + } + } // /** @@ -3327,8 +3441,18 @@ * @param log * the change log */ - public void setChangeLog(final IChangeLog log) { + public void setChangeLog(final IChangeLog changeLog) { + + this.changeLog = changeLog; + + if (assertBuffer != null) { + + assertBuffer.setChangeLog(changeLog); + + } } + + private IChangeLog changeLog; } Modified: branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ChangeRecord.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ChangeRecord.java 2010-10-04 23:12:46 UTC (rev 3728) +++ branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ChangeRecord.java 2010-10-05 02:33:11 UTC (rev 3729) @@ -1,7 +1,11 @@ package com.bigdata.rdf.sail.changesets; +import java.util.Comparator; import com.bigdata.rdf.model.BigdataStatement; import com.bigdata.rdf.model.StatementEnum; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.SPO; +import com.bigdata.rdf.spo.SPOComparator; public class ChangeRecord implements IChangeRecord { @@ -44,4 +48,55 @@ return stmt; } + + @Override + public boolean equals(Object o) { + + if (o == this) + return true; + + if (o == null || o instanceof IChangeRecord == false) + return false; + + final IChangeRecord rec = (IChangeRecord) o; + + final BigdataStatement stmt2 = rec.getStatement(); + + // statements are equal + if (stmt == stmt2 || + (stmt != null && stmt2 != null && stmt.equals(stmt2))) { + + // actions are equal + return action == rec.getChangeAction(); + + } + + return false; + + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + + sb.append(action).append(": ").append(stmt); + + return sb.toString(); + + } + + public static final Comparator<IChangeRecord> COMPARATOR = + new Comparator<IChangeRecord>() { + + public int compare(final IChangeRecord r1, final IChangeRecord r2) { + + final ISPO spo1 = new SPO(r1.getStatement()); + final ISPO spo2 = new SPO(r2.getStatement()); + + return SPOComparator.INSTANCE.compare(spo1, spo2); + + } + + }; + } Modified: branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/IChangeRecord.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/IChangeRecord.java 2010-10-04 23:12:46 UTC (rev 3728) +++ branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/IChangeRecord.java 2010-10-05 02:33:11 UTC (rev 3729) @@ -106,14 +106,14 @@ */ ChangeAction getChangeAction(); - /** - * If the change action is {@link ChangeAction#TYPE_CHANGE}, this method - * will return the old statement type of the focus statement. The - * new statement type is available on the focus statement itself. - * - * @return - * the old statement type of the focus statement - */ - StatementEnum getOldStatementType(); +// /** +// * If the change action is {@link ChangeAction#TYPE_CHANGE}, this method +// * will return the old statement type of the focus statement. The +// * new statement type is available on the focus statement itself. +// * +// * @return +// * the old statement type of the focus statement +// */ +// StatementEnum getOldStatementType(); } Modified: branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java 2010-10-04 23:12:46 UTC (rev 3728) +++ branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java 2010-10-05 02:33:11 UTC (rev 3729) @@ -274,8 +274,8 @@ return bindingSet; } - protected void compare(final TupleQueryResult result, - final Collection<BindingSet> answer) + protected void compare(final TupleQueryResult actual, + final Collection<BindingSet> expected) throws QueryEvaluationException { try { @@ -285,13 +285,13 @@ int resultCount = 0; int nmatched = 0; - while (result.hasNext()) { - BindingSet bindingSet = result.next(); + while (actual.hasNext()) { + BindingSet bindingSet = actual.next(); resultCount++; boolean match = false; if(log.isInfoEnabled()) log.info(bindingSet); - Iterator<BindingSet> it = answer.iterator(); + Iterator<BindingSet> it = expected.iterator(); while (it.hasNext()) { if (it.next().equals(bindingSet)) { it.remove(); @@ -304,7 +304,7 @@ extraResults.add(bindingSet); } } - missingResults = answer; + missingResults = expected; for (BindingSet bs : extraResults) { if (log.isInfoEnabled()) { @@ -326,7 +326,7 @@ } finally { - result.close(); + actual.close(); } Modified: branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java 2010-10-04 23:12:46 UTC (rev 3728) +++ branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java 2010-10-05 02:33:11 UTC (rev 3729) @@ -26,14 +26,21 @@ package com.bigdata.rdf.sail; +import java.util.Arrays; import java.util.Collection; import java.util.HashMap; +import java.util.Iterator; import java.util.LinkedList; +import java.util.List; import java.util.Map; +import java.util.Properties; import org.apache.log4j.Logger; import org.openrdf.model.URI; import org.openrdf.model.vocabulary.RDF; import org.openrdf.model.vocabulary.RDFS; +import org.openrdf.query.BindingSet; +import com.bigdata.rdf.axioms.NoAxioms; +import com.bigdata.rdf.axioms.OwlAxioms; import com.bigdata.rdf.model.BigdataStatement; import com.bigdata.rdf.model.BigdataValueFactory; import com.bigdata.rdf.sail.changesets.ChangeRecord; @@ -41,6 +48,8 @@ import com.bigdata.rdf.sail.changesets.IChangeRecord; import com.bigdata.rdf.sail.changesets.IChangeRecord.ChangeAction; import com.bigdata.rdf.store.BD; +import com.bigdata.rdf.vocab.NoVocabulary; +import com.bigdata.rdf.vocab.RDFSVocabulary; /** * @author <a href="mailto:mrp...@us...">Mike Personick</a> @@ -50,6 +59,44 @@ protected static final Logger log = Logger.getLogger(TestChangeSets.class); + public Properties getTriplesNoInference() { + + Properties props = super.getProperties(); + + // triples with sids + props.setProperty(BigdataSail.Options.QUADS, "false"); + props.setProperty(BigdataSail.Options.STATEMENT_IDENTIFIERS, "true"); + + // no inference + props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false"); + props.setProperty(BigdataSail.Options.AXIOMS_CLASS, NoAxioms.class.getName()); + props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, NoVocabulary.class.getName()); + props.setProperty(BigdataSail.Options.JUSTIFY, "false"); + props.setProperty(BigdataSail.Options.TEXT_INDEX, "false"); + + return props; + + } + + public Properties getTriplesWithInference() { + + Properties props = super.getProperties(); + + // triples with sids + props.setProperty(BigdataSail.Options.QUADS, "false"); + props.setProperty(BigdataSail.Options.STATEMENT_IDENTIFIERS, "true"); + + // no inference + props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "true"); + props.setProperty(BigdataSail.Options.AXIOMS_CLASS, OwlAxioms.class.getName()); + props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, RDFSVocabulary.class.getName()); + props.setProperty(BigdataSail.Options.JUSTIFY, "true"); + props.setProperty(BigdataSail.Options.TEXT_INDEX, "false"); + + return props; + + } + /** * */ @@ -63,9 +110,9 @@ super(arg0); } - public void testChangeSets() throws Exception { + public void testSimpleAdd() throws Exception { - final BigdataSail sail = getSail(); + final BigdataSail sail = getSail(getTriplesNoInference()); sail.initialize(); final BigdataSailRepository repo = new BigdataSailRepository(sail); final BigdataSailRepositoryConnection cxn = @@ -85,6 +132,188 @@ final URI b = vf.createURI(ns+"B"); final URI c = vf.createURI(ns+"C"); + final BigdataStatement[] stmts = new BigdataStatement[] { + vf.createStatement(a, RDFS.SUBCLASSOF, b), + vf.createStatement(b, RDFS.SUBCLASSOF, c), + }; + + final BigdataStatement[] stmts2 = new BigdataStatement[] { + vf.createStatement(a, RDFS.SUBCLASSOF, c), + }; + +/**/ + cxn.setNamespace("ns", ns); + + // add the stmts[] + + for (BigdataStatement stmt : stmts) { + cxn.add(stmt); + } + + cxn.commit();// + + { // should see all of the stmts[] added + + final Collection<IChangeRecord> expected = + new LinkedList<IChangeRecord>(); + for (BigdataStatement stmt : stmts) { + expected.add(new ChangeRecord(stmt, ChangeAction.ADDED)); + } + + compare(expected, changeLog.getChangeSet()); + + } + + // add the stmts[] again + + for (BigdataStatement stmt : stmts) { + cxn.add(stmt); + } + + cxn.commit();// + + { // shouldn't see any change records + + compare(new LinkedList<IChangeRecord>(), changeLog.getChangeSet()); + + } + + // add the stmts2[] + + for (BigdataStatement stmt : stmts2) { + cxn.add(stmt); + } + + cxn.commit();// + + { // should see all of the stmts2[] added + + final Collection<IChangeRecord> expected = + new LinkedList<IChangeRecord>(); + for (BigdataStatement stmt : stmts2) { + expected.add(new ChangeRecord(stmt, ChangeAction.ADDED)); + } + + compare(expected, changeLog.getChangeSet()); + + } + + if (log.isDebugEnabled()) { + log.debug("\n" + sail.getDatabase().dumpStore(true, true, false)); + } + + } finally { + cxn.close(); + sail.__tearDownUnitTest(); + } + + } + + public void testSimpleRemove() throws Exception { + + final BigdataSail sail = getSail(getTriplesNoInference()); + sail.initialize(); + final BigdataSailRepository repo = new BigdataSailRepository(sail); + final BigdataSailRepositoryConnection cxn = + (BigdataSailRepositoryConnection) repo.getConnection(); + cxn.setAutoCommit(false); + + final TestChangeLog changeLog = new TestChangeLog(); + cxn.setChangeLog(changeLog); + + try { + + final BigdataValueFactory vf = (BigdataValueFactory) sail.getValueFactory(); + + final String ns = BD.NAMESPACE; + + final URI a = vf.createURI(ns+"A"); + final URI b = vf.createURI(ns+"B"); + final URI c = vf.createURI(ns+"C"); + + final BigdataStatement[] stmts = new BigdataStatement[] { + vf.createStatement(a, RDFS.SUBCLASSOF, b), + vf.createStatement(b, RDFS.SUBCLASSOF, c), + }; + +/**/ + cxn.setNamespace("ns", ns); + + // add the stmts[] + + for (BigdataStatement stmt : stmts) { + cxn.add(stmt); + } + + cxn.commit();// + + // remove the stmts[] + + for (BigdataStatement stmt : stmts) { + cxn.remove(stmt); + } + + cxn.commit();// + + if (log.isDebugEnabled()) { + log.debug("\ndump store:\n" + sail.getDatabase().dumpStore(true, true, false)); + } + + { // should see all of the stmts[] removed + + final Collection<IChangeRecord> expected = + new LinkedList<IChangeRecord>(); + for (BigdataStatement stmt : stmts) { + expected.add(new ChangeRecord(stmt, ChangeAction.REMOVED)); + } + + compare(expected, changeLog.getChangeSet()); + + } + + // remove the stmts[] again + + for (BigdataStatement stmt : stmts) { + cxn.remove(stmt); + } + + cxn.commit();// + + { // shouldn't see any change records + + compare(new LinkedList<IChangeRecord>(), changeLog.getChangeSet()); + + } + + } finally { + cxn.close(); + sail.__tearDownUnitTest(); + } + + } + + public void testTruthMaintenance() throws Exception { + + final BigdataSail sail = getSail(getTriplesWithInference()); + sail.initialize(); + final BigdataSailRepository repo = new BigdataSailRepository(sail); + final BigdataSailRepositoryConnection cxn = + (BigdataSailRepositoryConnection) repo.getConnection(); + cxn.setAutoCommit(false); + + final TestChangeLog changeLog = new TestChangeLog(); + cxn.setChangeLog(changeLog); + + try { + + final BigdataValueFactory vf = (BigdataValueFactory) sail.getValueFactory(); + + final String ns = BD.NAMESPACE; + + final URI a = vf.createURI(ns+"A"); + final URI b = vf.createURI(ns+"B"); + final URI c = vf.createURI(ns+"C"); + final BigdataStatement[] explicit = new BigdataStatement[] { vf.createStatement(a, RDFS.SUBCLASSOF, b), vf.createStatement(b, RDFS.SUBCLASSOF, c), @@ -134,11 +363,52 @@ } - private void compare(final Collection<IChangeRecord> expected, + private void compare(final Collection<IChangeRecord> expected, final Collection<IChangeRecord> actual) { - fail(); + final Collection<IChangeRecord> extra = new LinkedList<IChangeRecord>(); + Collection<IChangeRecord> missing = new LinkedList<IChangeRecord>(); + + int resultCount = 0; + int nmatched = 0; + for (IChangeRecord rec : actual) { + resultCount++; + boolean match = false; + if(log.isInfoEnabled()) + log.info(rec); + Iterator<IChangeRecord> it = expected.iterator(); + while (it.hasNext()) { + if (it.next().equals(rec)) { + it.remove(); + match = true; + nmatched++; + break; + } + } + if (match == false) { + extra.add(rec); + } + } + missing = expected; + + for (IChangeRecord rec : extra) { + if (log.isInfoEnabled()) { + log.info("extra result: " + rec); + } + } + for (IChangeRecord rec : missing) { + if (log.isInfoEnabled()) { + log.info("missing result: " + rec); + } + } + + if (!extra.isEmpty() || !missing.isEmpty()) { + fail("matchedResults=" + nmatched + ", extraResults=" + + extra.size() + ", missingResults=" + + missing.size()); + } + } /** @@ -165,12 +435,16 @@ public synchronized void changeEvent(final IChangeRecord record) { + System.err.println(record); + uncommitted.put(record.getStatement(), record); } public synchronized void transactionCommited() { + System.err.println("transaction committed"); + committed.clear(); committed.putAll(uncommitted); @@ -181,6 +455,8 @@ public synchronized void transactionAborted() { + System.err.println("transaction aborted"); + uncommitted.clear(); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2010-10-06 02:34:21
|
Revision: 3736 http://bigdata.svn.sourceforge.net/bigdata/?rev=3736&view=rev Author: mrpersonick Date: 2010-10-06 02:34:14 +0000 (Wed, 06 Oct 2010) Log Message: ----------- change sets notification for truth maintenance add and remove Modified Paths: -------------- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/StatementWriter.java branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java Added Paths: ----------- branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/StatementWriter.java Modified: branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java 2010-10-05 20:30:37 UTC (rev 3735) +++ branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java 2010-10-06 02:34:14 UTC (rev 3736) @@ -29,11 +29,15 @@ import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicLong; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.model.BigdataBNode; +import com.bigdata.rdf.sail.changesets.IChangeLog; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.ISPOAssertionBuffer; import com.bigdata.rdf.spo.JustificationWriter; @@ -101,6 +105,10 @@ * {@link Justification}s for entailments. */ protected final boolean justify; + + protected final IChangeLog changeLog; + + protected final Map<IV, BigdataBNode> bnodes; /** * Create a buffer. @@ -126,6 +134,17 @@ AbstractTripleStore db, IElementFilter<ISPO> filter, int capacity, boolean justified) { + this(focusStore, db, filter, capacity, justified, + null/* changeLog */, null/* bnodes */); + + } + + public SPOAssertionBuffer(AbstractTripleStore focusStore, + AbstractTripleStore db, IElementFilter<ISPO> filter, int capacity, + boolean justified, + final IChangeLog changeLog, final Map<IV, BigdataBNode> bnodes + ) { + super(db, filter, capacity); if (focusStore == null) @@ -142,6 +161,10 @@ justifications = justified ? new Justification[capacity] : null; + this.changeLog = changeLog; + + this.bnodes = bnodes; + } /** @@ -180,12 +203,28 @@ if (numJustifications == 0) { - // batch insert statements into the focusStore. - n = db.addStatements( + if (changeLog == null) { + + // batch insert statements into the focusStore. + n = db.addStatements( focusStore, true/* copyOnly */, new ChunkedArrayIterator<ISPO>(numStmts, stmts, null/*keyOrder*/), null/*filter*/); + + } else { + + n = com.bigdata.rdf.sail.changesets. + StatementWriter.addStatements( + db, + focusStore, + true/* copyOnly */, + null/* filter */, + new ChunkedArrayIterator<ISPO>(numStmts, stmts, null/*keyOrder*/), + changeLog, + bnodes); + + } } else { @@ -209,7 +248,8 @@ // task will write SPOs on the statement indices. tasks.add(new StatementWriter(getTermDatabase(), focusStore, false/* copyOnly */, new ChunkedArrayIterator<ISPO>( - numStmts, stmts, null/*keyOrder*/), nwritten)); + numStmts, stmts, null/*keyOrder*/), nwritten, + changeLog, bnodes)); // task will write justifications on the justifications index. final AtomicLong nwrittenj = new AtomicLong(); Modified: branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java 2010-10-05 20:30:37 UTC (rev 3735) +++ branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java 2010-10-06 02:34:14 UTC (rev 3736) @@ -27,6 +27,11 @@ package com.bigdata.rdf.inf; +import java.util.Map; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.model.BigdataBNode; +import com.bigdata.rdf.sail.changesets.IChangeLog; +import com.bigdata.rdf.sail.changesets.StatementWriter; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.SPO; import com.bigdata.rdf.store.AbstractTripleStore; @@ -50,6 +55,10 @@ private final AbstractTripleStore store; private final boolean computeClosureForStatementIdentifiers; + protected final IChangeLog changeLog; + + protected final Map<IV, BigdataBNode> bnodes; + /** * @param store * The database from which the statement will be removed when the @@ -63,6 +72,15 @@ public SPORetractionBuffer(AbstractTripleStore store, int capacity, boolean computeClosureForStatementIdentifiers) { + this(store, capacity, computeClosureForStatementIdentifiers, + null/* changeLog */, null/* bnodes */); + + } + + public SPORetractionBuffer(AbstractTripleStore store, int capacity, + boolean computeClosureForStatementIdentifiers, + final IChangeLog changeLog, final Map<IV, BigdataBNode> bnodes) { + super(store, null/*filter*/, capacity); if (store == null) @@ -72,14 +90,34 @@ this.computeClosureForStatementIdentifiers = computeClosureForStatementIdentifiers; + this.changeLog = changeLog; + + this.bnodes = bnodes; + } public int flush() { if (isEmpty()) return 0; - long n = store.removeStatements(new ChunkedArrayIterator<ISPO>(numStmts,stmts, + final long n; + + if (changeLog == null) { + + n = store.removeStatements(new ChunkedArrayIterator<ISPO>(numStmts,stmts, null/*keyOrder*/), computeClosureForStatementIdentifiers); + + } else { + + n = StatementWriter.removeStatements( + store, + new ChunkedArrayIterator<ISPO>( + numStmts,stmts,null/*keyOrder*/), + computeClosureForStatementIdentifiers, + changeLog, + bnodes); + + } // reset the counter. numStmts = 0; Modified: branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java 2010-10-05 20:30:37 UTC (rev 3735) +++ branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java 2010-10-06 02:34:14 UTC (rev 3736) @@ -47,21 +47,27 @@ package com.bigdata.rdf.inf; +import java.util.Map; import java.util.Properties; import org.apache.log4j.Logger; import org.apache.log4j.MDC; import com.bigdata.journal.TemporaryStore; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.model.BigdataBNode; +import com.bigdata.rdf.model.BigdataStatement; import com.bigdata.rdf.model.StatementEnum; import com.bigdata.rdf.rio.IStatementBuffer; import com.bigdata.rdf.rules.InferenceEngine; +import com.bigdata.rdf.sail.changesets.IChangeLog; import com.bigdata.rdf.spo.ExplicitSPOFilter; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.SPO; import com.bigdata.rdf.spo.SPOArrayIterator; import com.bigdata.rdf.spo.SPOKeyOrder; import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.store.BigdataStatementIterator; import com.bigdata.rdf.store.IRawTripleStore; import com.bigdata.rdf.store.TempTripleStore; import com.bigdata.relation.accesspath.IElementFilter; @@ -234,8 +240,21 @@ static public int applyExistingStatements( final AbstractTripleStore focusStore, final AbstractTripleStore database, - final IElementFilter<ISPO> filter) { + final IElementFilter<ISPO> filter + ) { + + return applyExistingStatements(focusStore, database, filter, + null/* changeLog */, null/* bnodes */); + } + + static public int applyExistingStatements( + final AbstractTripleStore focusStore, + final AbstractTripleStore database, + final IElementFilter<ISPO> filter, + final IChangeLog changeLog, final Map<IV, BigdataBNode> bnodes + ) { + if(INFO) log.info("Filtering statements already known to the database"); @@ -248,7 +267,7 @@ final IChunkedOrderedIterator<ISPO> itr = focusStore.getAccessPath( SPOKeyOrder.SPO, ExplicitSPOFilter.INSTANCE).iterator(); - + int nremoved = 0; int nupgraded = 0; @@ -266,7 +285,8 @@ */ final SPOAssertionBuffer assertionBuffer = new SPOAssertionBuffer( - database, database, filter, capacity, false/* justified */); + database, database, filter, capacity, false/* justified */, + changeLog, bnodes); /* * This buffer will retract statements from the tempStore that are @@ -290,7 +310,7 @@ for(int i=0; i<chunk.length; i++) { final SPO spo = (SPO)chunk[i]; - + // Lookup the statement in the database. final ISPO tmp = database.getStatement(spo.s, spo.p, spo.o); @@ -365,6 +385,13 @@ */ public ClosureStats assertAll(final TempTripleStore tempStore) { + return assertAll(tempStore, null/* changeLog */, null/* bnodes */); + + } + + public ClosureStats assertAll(final TempTripleStore tempStore, + final IChangeLog changeLog, final Map<IV, BigdataBNode> bnodes) { + if (tempStore == null) { throw new IllegalArgumentException(); @@ -409,7 +436,7 @@ * consistent if we change our mind about that practice. */ - applyExistingStatements(tempStore, database, inferenceEngine.doNotAddFilter); + applyExistingStatements(tempStore, database, inferenceEngine.doNotAddFilter, changeLog, bnodes); final ClosureStats stats = inferenceEngine.computeClosure(tempStore); @@ -429,7 +456,8 @@ // tempStore.dumpStore(database,true,true,false,true); final long ncopied = tempStore.copyStatements(database, - null/* filter */, true /* copyJustifications */); + null/* filter */, true /* copyJustifications */, + changeLog, bnodes); // database.dumpStore(database,true,true,false,true); @@ -478,6 +506,13 @@ */ public ClosureStats retractAll(final TempTripleStore tempStore) { + return retractAll(tempStore, null/* changeLog */, null/* bnodes */); + + } + + public ClosureStats retractAll(final TempTripleStore tempStore, + final IChangeLog changeLog, final Map<IV, BigdataBNode> bnodes) { + final long begin = System.currentTimeMillis(); final ClosureStats stats = new ClosureStats(); @@ -512,7 +547,7 @@ } // do truth maintenance. - retractAll(stats, tempStore, 0); + retractAll(stats, tempStore, 0, changeLog, bnodes); MDC.remove("depth"); @@ -591,7 +626,8 @@ * explicit statements to be retracted. */ private void retractAll(final ClosureStats stats, - final TempTripleStore tempStore, final int depth) { + final TempTripleStore tempStore, final int depth, + final IChangeLog changeLog, final Map<IV, BigdataBNode> bnodes) { MDC.put("depth", "depth=" + depth); @@ -640,7 +676,9 @@ database, // the persistent db. null, //filter @todo was inferenceEngine.doNotAddFilter, capacity,// - false // justify + false,// justify + changeLog, + bnodes ); /* @@ -657,7 +695,8 @@ * identifiers. */ final SPORetractionBuffer retractionBuffer = new SPORetractionBuffer( - database, capacity, false/* computeClosureForStatementIdentifiers */); + database, capacity, false/* computeClosureForStatementIdentifiers */, + changeLog, bnodes); /* * Note: when we enter this method recursively statements in the @@ -964,7 +1003,7 @@ * Recursive processing. */ - retractAll(stats, focusStore, depth + 1); + retractAll(stats, focusStore, depth + 1, changeLog, bnodes); } Modified: branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/StatementWriter.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/StatementWriter.java 2010-10-05 20:30:37 UTC (rev 3735) +++ branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/StatementWriter.java 2010-10-06 02:34:14 UTC (rev 3736) @@ -1,9 +1,17 @@ package com.bigdata.rdf.spo; +import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicLong; - +import org.apache.log4j.Logger; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.model.BigdataBNode; +import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.sail.changesets.ChangeRecord; +import com.bigdata.rdf.sail.changesets.IChangeLog; +import com.bigdata.rdf.sail.changesets.IChangeRecord.ChangeAction; import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.store.BigdataStatementIteratorImpl; import com.bigdata.relation.accesspath.IElementFilter; import com.bigdata.striterator.IChunkedOrderedIterator; @@ -18,6 +26,8 @@ */ public class StatementWriter implements Callable<Long>{ + protected static final Logger log = Logger.getLogger(StatementWriter.class); + private final AbstractTripleStore database; private final AbstractTripleStore statementStore; private final boolean copyOnly; @@ -27,6 +37,10 @@ * Incremented by the #of statements written on the statements indices. */ public final AtomicLong nwritten; + + private final IChangeLog changeLog; + + private final Map<IV, BigdataBNode> bnodes; /** * @param database @@ -51,7 +65,17 @@ public StatementWriter(AbstractTripleStore database, AbstractTripleStore statementStore, boolean copyOnly, IChunkedOrderedIterator<ISPO> itr, AtomicLong nwritten) { - + + this(database, statementStore, copyOnly, itr, nwritten, + null/* changeLog */, null/* bnodes */); + + } + + public StatementWriter(final AbstractTripleStore database, + final AbstractTripleStore statementStore, final boolean copyOnly, + final IChunkedOrderedIterator<ISPO> itr, final AtomicLong nwritten, + final IChangeLog changeLog, final Map<IV, BigdataBNode> bnodes) { + if (database == null) throw new IllegalArgumentException(); @@ -73,6 +97,10 @@ this.itr = itr; this.nwritten = nwritten; + + this.changeLog = changeLog; + + this.bnodes = bnodes; } @@ -85,11 +113,30 @@ final long begin = System.currentTimeMillis(); - nwritten.addAndGet(database.addStatements(statementStore, copyOnly, - itr, null/* filter */)); + final long n; + + if (changeLog == null) { + + n = database.addStatements(statementStore, copyOnly, + itr, null/* filter */); + + } else { + n = com.bigdata.rdf.sail.changesets.StatementWriter.addStatements( + database, + statementStore, + copyOnly, + null/* filter */, + itr, + changeLog, + bnodes); + + } + + nwritten.addAndGet(n); + return System.currentTimeMillis() - begin; } - + } Modified: branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2010-10-05 20:30:37 UTC (rev 3735) +++ branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2010-10-06 02:34:14 UTC (rev 3736) @@ -87,6 +87,7 @@ import com.bigdata.rdf.lexicon.ITermIndexCodes; import com.bigdata.rdf.lexicon.ITextIndexer; import com.bigdata.rdf.lexicon.LexiconRelation; +import com.bigdata.rdf.model.BigdataBNode; import com.bigdata.rdf.model.BigdataResource; import com.bigdata.rdf.model.BigdataStatement; import com.bigdata.rdf.model.BigdataURI; @@ -102,6 +103,7 @@ import com.bigdata.rdf.rules.MatchRule; import com.bigdata.rdf.rules.RDFJoinNexusFactory; import com.bigdata.rdf.rules.RuleContextEnum; +import com.bigdata.rdf.sail.changesets.IChangeLog; import com.bigdata.rdf.spo.BulkCompleteConverter; import com.bigdata.rdf.spo.BulkFilterConverter; import com.bigdata.rdf.spo.ExplicitSPOFilter; @@ -2982,6 +2984,18 @@ final AbstractTripleStore dst,// final IElementFilter<ISPO> filter,// final boolean copyJustifications// + ) { + + return copyStatements(dst, filter, copyJustifications, + null/* changeLog */, null /* bnodes */); + + } + + public long copyStatements(// + final AbstractTripleStore dst,// + final IElementFilter<ISPO> filter,// + final boolean copyJustifications,// + final IChangeLog changeLog, final Map<IV, BigdataBNode> bnodes ) { if (dst == this) @@ -2995,9 +3009,25 @@ if (!copyJustifications) { - // add statements to the target store. - return dst - .addStatements(dst, true/* copyOnly */, itr, null/* filter */); + if (changeLog == null) { + + // add statements to the target store. + return dst + .addStatements(dst, true/* copyOnly */, itr, null/* filter */); + + } else { + + return com.bigdata.rdf.sail.changesets. + StatementWriter.addStatements( + dst, + dst, + true/* copyOnly */, + null/* filter */, + itr, + changeLog, + bnodes); + + } } else { @@ -3020,8 +3050,8 @@ final AtomicLong nwritten = new AtomicLong(); // task will write SPOs on the statement indices. - tasks.add(new StatementWriter(this, dst, true/* copyOnly */, - itr, nwritten)); + tasks.add(new StatementWriter(dst, dst, true/* copyOnly */, + itr, nwritten, changeLog, bnodes)); // task will write justifications on the justifications index. final AtomicLong nwrittenj = new AtomicLong(); Modified: branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2010-10-05 20:30:37 UTC (rev 3735) +++ branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2010-10-06 02:34:14 UTC (rev 3736) @@ -132,6 +132,7 @@ import com.bigdata.rdf.sail.changesets.ChangeRecord; import com.bigdata.rdf.sail.changesets.IChangeLog; import com.bigdata.rdf.sail.changesets.IChangeRecord; +import com.bigdata.rdf.sail.changesets.StatementWriter; import com.bigdata.rdf.sail.changesets.IChangeRecord.ChangeAction; import com.bigdata.rdf.spo.ExplicitSPOFilter; import com.bigdata.rdf.spo.ISPO; @@ -2329,35 +2330,42 @@ } else { - final IAccessPath<ISPO> ap = - database.getAccessPath(s, p, o, c); - - final IChunkedOrderedIterator<ISPO> itr = ap.iterator(); + final IChunkedOrderedIterator<ISPO> itr = + database.getAccessPath(s, p, o, c).iterator(); - if (itr.hasNext()) { - - final BigdataStatementIteratorImpl itr2 = - new BigdataStatementIteratorImpl(database, bnodes2, itr) - .start(database.getExecutorService()); - - final BigdataStatement[] stmts = - new BigdataStatement[database.getChunkCapacity()]; - - int i = 0; - while (i < stmts.length && itr2.hasNext()) { - stmts[i++] = itr2.next(); - if (i == stmts.length) { - // process stmts[] - n += removeAndNotify(stmts, i); - i = 0; - } - } - if (i > 0) { - n += removeAndNotify(stmts, i); - } - - } + n = StatementWriter.removeStatements(database, itr, + true/* computeClosureForStatementIdentifiers */, + changeLog, bnodes2); +// final IAccessPath<ISPO> ap = +// database.getAccessPath(s, p, o, c); +// +// final IChunkedOrderedIterator<ISPO> itr = ap.iterator(); +// +// if (itr.hasNext()) { +// +// final BigdataStatementIteratorImpl itr2 = +// new BigdataStatementIteratorImpl(database, bnodes2, itr) +// .start(database.getExecutorService()); +// +// final BigdataStatement[] stmts = +// new BigdataStatement[database.getChunkCapacity()]; +// +// int i = 0; +// while (i < stmts.length && itr2.hasNext()) { +// stmts[i++] = itr2.next(); +// if (i == stmts.length) { +// // process stmts[] +// n += removeAndNotify(stmts, i); +// i = 0; +// } +// } +// if (i > 0) { +// n += removeAndNotify(stmts, i); +// } +// +// } + } } @@ -2367,69 +2375,69 @@ } - private long removeAndNotify(final BigdataStatement[] stmts, final int numStmts) { - - final SPO[] tmp = new SPO[numStmts]; +// private long removeAndNotify(final BigdataStatement[] stmts, final int numStmts) { +// +// final SPO[] tmp = new SPO[numStmts]; +// +// for (int i = 0; i < tmp.length; i++) { +// +// final BigdataStatement stmt = stmts[i]; +// +// /* +// * Note: context position is not passed when statement identifiers +// * are in use since the statement identifier is assigned based on +// * the {s,p,o} triple. +// */ +// +// final SPO spo = new SPO(stmt); +// +// if (log.isDebugEnabled()) +// log.debug("adding: " + stmt.toString() + " (" + spo + ")"); +// +// if(!spo.isFullyBound()) { +// +// throw new AssertionError("Not fully bound? : " + spo); +// +// } +// +// tmp[i] = spo; +// +// } +// +// /* +// * Note: When handling statement identifiers, we clone tmp[] to avoid a +// * side-effect on its order so that we can unify the assigned statement +// * identifiers below. +// * +// * Note: In order to report back the [ISPO#isModified()] flag, we also +// * need to clone tmp[] to avoid a side effect on its order. Therefore we +// * now always clone tmp[]. +// */ +//// final long nwritten = writeSPOs(sids ? tmp.clone() : tmp, numStmts); +// final long nwritten = database.removeStatements(tmp.clone(), numStmts); +// +// // Copy the state of the isModified() flag +// { +// +// for (int i = 0; i < numStmts; i++) { +// +// if (tmp[i].isModified()) { +// +// stmts[i].setModified(true); +// +// changeLog.changeEvent( +// new ChangeRecord(stmts[i], ChangeAction.REMOVED)); +// +// } +// +// } +// +// } +// +// return nwritten; +// +// } - for (int i = 0; i < tmp.length; i++) { - - final BigdataStatement stmt = stmts[i]; - - /* - * Note: context position is not passed when statement identifiers - * are in use since the statement identifier is assigned based on - * the {s,p,o} triple. - */ - - final SPO spo = new SPO(stmt); - - if (log.isDebugEnabled()) - log.debug("adding: " + stmt.toString() + " (" + spo + ")"); - - if(!spo.isFullyBound()) { - - throw new AssertionError("Not fully bound? : " + spo); - - } - - tmp[i] = spo; - - } - - /* - * Note: When handling statement identifiers, we clone tmp[] to avoid a - * side-effect on its order so that we can unify the assigned statement - * identifiers below. - * - * Note: In order to report back the [ISPO#isModified()] flag, we also - * need to clone tmp[] to avoid a side effect on its order. Therefore we - * now always clone tmp[]. - */ -// final long nwritten = writeSPOs(sids ? tmp.clone() : tmp, numStmts); - final long nwritten = database.removeStatements(tmp.clone(), numStmts); - - // Copy the state of the isModified() flag - { - - for (int i = 0; i < numStmts; i++) { - - if (tmp[i].isModified()) { - - stmts[i].setModified(true); - - changeLog.changeEvent( - new ChangeRecord(stmts[i], ChangeAction.REMOVED)); - - } - - } - - } - - return nwritten; - - } - public synchronized CloseableIteration<? extends Resource, SailException> getContextIDs() throws SailException { @@ -2695,7 +2703,9 @@ if(getTruthMaintenance()) { // do TM, writing on the database. - tm.assertAll((TempTripleStore)assertBuffer.getStatementStore()); + tm.assertAll( + (TempTripleStore)assertBuffer.getStatementStore(), + changeLog, bnodes2); // must be reallocated on demand. assertBuffer = null; @@ -2712,7 +2722,8 @@ if(getTruthMaintenance()) { // do TM, writing on the database. - tm.retractAll((TempTripleStore)retractBuffer.getStatementStore()); + tm.retractAll((TempTripleStore)retractBuffer.getStatementStore(), + changeLog, bnodes2); // must be re-allocated on demand. retractBuffer = null; Added: branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/StatementWriter.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/StatementWriter.java (rev 0) +++ branches/CHANGE_SET_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/StatementWriter.java 2010-10-06 02:34:14 UTC (rev 3736) @@ -0,0 +1,196 @@ +package com.bigdata.rdf.sail.changesets; + +import java.util.Iterator; +import java.util.Map; +import org.apache.log4j.Logger; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.model.BigdataBNode; +import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.sail.changesets.IChangeRecord.ChangeAction; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.SPO; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.store.BigdataStatementIteratorImpl; +import com.bigdata.relation.accesspath.IElementFilter; +import com.bigdata.striterator.ChunkedArrayIterator; +import com.bigdata.striterator.IChunkedOrderedIterator; + +public class StatementWriter { + + protected static final Logger log = Logger.getLogger(StatementWriter.class); + + public static long addStatements(final AbstractTripleStore database, + final AbstractTripleStore statementStore, + final boolean copyOnly, + final IElementFilter<ISPO> filter, + final IChunkedOrderedIterator<ISPO> itr, + final IChangeLog changeLog, + final Map<IV, BigdataBNode> bnodes) { + + long n = 0; + + if (itr.hasNext()) { + + final BigdataStatementIteratorImpl itr2 = + new BigdataStatementIteratorImpl(database, bnodes, itr) + .start(database.getExecutorService()); + + final BigdataStatement[] stmts = + new BigdataStatement[database.getChunkCapacity()]; + + int i = 0; + while ((i = nextChunk(itr2, stmts)) > 0) { + n += addStatements(database, statementStore, copyOnly, filter, + stmts, i, changeLog); + } + + } + + return n; + + } + + private static long addStatements(final AbstractTripleStore database, + final AbstractTripleStore statementStore, + final boolean copyOnly, + final IElementFilter<ISPO> filter, + final BigdataStatement[] stmts, + final int numStmts, + final IChangeLog changeLog) { + + final SPO[] tmp = allocateSPOs(stmts, numStmts); + + final long n = database.addStatements(statementStore, copyOnly, + new ChunkedArrayIterator<ISPO>(numStmts, tmp.clone(), + null/* keyOrder */), filter); + + // Copy the state of the isModified() flag and notify changeLog + for (int i = 0; i < numStmts; i++) { + + if (tmp[i].isModified()) { + + stmts[i].setModified(true); + + changeLog.changeEvent( + new ChangeRecord(stmts[i], ChangeAction.ADDED)); + + } + + } + + return n; + + } + + public static long removeStatements(final AbstractTripleStore database, + final IChunkedOrderedIterator<ISPO> itr, + final boolean computeClosureForStatementIdentifiers, + final IChangeLog changeLog, + final Map<IV, BigdataBNode> bnodes) { + + long n = 0; + + if (itr.hasNext()) { + + final BigdataStatementIteratorImpl itr2 = + new BigdataStatementIteratorImpl(database, bnodes, itr) + .start(database.getExecutorService()); + + final BigdataStatement[] stmts = + new BigdataStatement[database.getChunkCapacity()]; + + int i = 0; + while ((i = nextChunk(itr2, stmts)) > 0) { + n += removeStatements(database, stmts, i, + computeClosureForStatementIdentifiers, changeLog); + } + + } + + return n; + + } + + private static long removeStatements(final AbstractTripleStore database, + final BigdataStatement[] stmts, + final int numStmts, + final boolean computeClosureForStatementIdentifiers, + final IChangeLog changeLog) { + + final SPO[] tmp = allocateSPOs(stmts, numStmts); + + final long n = database.removeStatements( + new ChunkedArrayIterator<ISPO>(numStmts, tmp.clone(), + null/* keyOrder */), + computeClosureForStatementIdentifiers); + + // Copy the state of the isModified() flag and notify changeLog + for (int i = 0; i < numStmts; i++) { + + if (tmp[i].isModified()) { + + stmts[i].setModified(true); + + changeLog.changeEvent( + new ChangeRecord(stmts[i], ChangeAction.REMOVED)); + + } + + } + + return n; + + } + + private static int nextChunk(final Iterator<BigdataStatement> itr, + final BigdataStatement[] stmts) { + + assert stmts != null && stmts.length > 0; + + int i = 0; + while (itr.hasNext()) { + stmts[i++] = itr.next(); + if (i == stmts.length) { + // stmts[] is full + return i; + } + } + + /* + * stmts[] is empty (i = 0) or partially + * full (i > 0 && i < stmts.length) + */ + return i; + + } + + private static SPO[] allocateSPOs(final BigdataStatement[] stmts, + final int numStmts) { + + final SPO[] tmp = new SPO[numStmts]; + + for (int i = 0; i < tmp.length; i++) { + + final BigdataStatement stmt = stmts[i]; + + final SPO spo = new SPO(stmt); + + if (log.isDebugEnabled()) + log.debug("writing: " + stmt.toString() + " (" + spo + ")"); + + if(!spo.isFullyBound()) { + + throw new AssertionError("Not fully bound? : " + spo); + + } + + tmp[i] = spo; + + } + + return tmp; + + + } + +} Modified: branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java 2010-10-05 20:30:37 UTC (rev 3735) +++ branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java 2010-10-06 02:34:14 UTC (rev 3736) @@ -292,7 +292,7 @@ } - public void testTruthMaintenance() throws Exception { + public void testTMAdd() throws Exception { final BigdataSail sail = getSail(getTriplesWithInference()); sail.initialize(); @@ -363,6 +363,113 @@ } + public void testTMRetract() throws Exception { + + final BigdataSail sail = getSail(getTriplesWithInference()); + sail.initialize(); + final BigdataSailRepository repo = new BigdataSailRepository(sail); + final BigdataSailRepositoryConnection cxn = + (BigdataSailRepositoryConnection) repo.getConnection(); + cxn.setAutoCommit(false); + + final TestChangeLog changeLog = new TestChangeLog(); + cxn.setChangeLog(changeLog); + + try { + + final BigdataValueFactory vf = (BigdataValueFactory) sail.getValueFactory(); + + final String ns = BD.NAMESPACE; + + final URI a = vf.createURI(ns+"A"); + final URI b = vf.createURI(ns+"B"); + final URI c = vf.createURI(ns+"C"); + + final BigdataStatement[] explicitAdd = new BigdataStatement[] { + vf.createStatement(a, RDFS.SUBCLASSOF, b), + vf.createStatement(b, RDFS.SUBCLASSOF, c), + }; + + final BigdataStatement[] inferredAdd = new BigdataStatement[] { + vf.createStatement(a, RDF.TYPE, RDFS.CLASS), + vf.createStatement(a, RDFS.SUBCLASSOF, RDFS.RESOURCE), + vf.createStatement(a, RDFS.SUBCLASSOF, a), + vf.createStatement(a, RDFS.SUBCLASSOF, c), + vf.createStatement(b, RDF.TYPE, RDFS.CLASS), + vf.createStatement(b, RDFS.SUBCLASSOF, RDFS.RESOURCE), + vf.createStatement(b, RDFS.SUBCLASSOF, b), + vf.createStatement(c, RDF.TYPE, RDFS.CLASS), + vf.createStatement(c, RDFS.SUBCLASSOF, RDFS.RESOURCE), + vf.createStatement(c, RDFS.SUBCLASSOF, c), + }; + + final BigdataStatement[] explicitRemove = new BigdataStatement[] { + vf.createStatement(b, RDFS.SUBCLASSOF, c), + }; + + final BigdataStatement[] inferredRemove = new BigdataStatement[] { + vf.createStatement(a, RDFS.SUBCLASSOF, c), + vf.createStatement(c, RDF.TYPE, RDFS.CLASS), + vf.createStatement(c, RDFS.SUBCLASSOF, RDFS.RESOURCE), + vf.createStatement(c, RDFS.SUBCLASSOF, c), + }; + +/**/ + cxn.setNamespace("ns", ns); + + for (BigdataStatement stmt : explicitAdd) { + cxn.add(stmt); + } + + cxn.commit();// + + { + + final Collection<IChangeRecord> expected = + new LinkedList<IChangeRecord>(); + for (BigdataStatement stmt : explicitAdd) { + expected.add(new ChangeRecord(stmt, ChangeAction.ADDED)); + } + for (BigdataStatement stmt : inferredAdd) { + expected.add(new ChangeRecord(stmt, ChangeAction.ADDED)); + } + + compare(expected, changeLog.getChangeSet()); + + } + + for (BigdataStatement stmt : explicitRemove) { + cxn.remove(stmt); + } + + cxn.commit();// + + { + + final Collection<IChangeRecord> expected = + new LinkedList<IChangeRecord>(); + for (BigdataStatement stmt : explicitRemove) { + expected.add(new ChangeRecord(stmt, ChangeAction.REMOVED)); + } + for (BigdataStatement stmt : inferredRemove) { + expected.add(new ChangeRecord(stmt, ChangeAction.REMOVED)); + } + + compare(expected, changeLog.getChangeSet()); + + } + + if (log.isDebugEnabled()) { + log.debug("\n" + sail.getDatabase().dumpStore(true, true, false)); + } + + } finally { + cxn.close(); + sail.__tearDownUnitTest(); + } + + } + private void compare(final Collection<IChangeRecord> expected, final Collection<IChangeRecord> actual) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-11-23 14:17:35
|
Revision: 3977 http://bigdata.svn.sourceforge.net/bigdata/?rev=3977&view=rev Author: thompsonbry Date: 2010-11-23 14:17:27 +0000 (Tue, 23 Nov 2010) Log Message: ----------- Merge from TRUNK to BRANCH (r3608:r3976). merge -r3608:HEAD https://bigdata.svn.sourceforge.net/svnroot/bigdata/trunk /root/workspace/bigdata-change-set-branch {{{ --- Merging r3608 through r3976 into /root/workspace/bigdata-change-set-branch U /root/workspace/bigdata-change-set-branch/bigdata-perf/README.txt U /root/workspace/bigdata-change-set-branch/build.xml A /root/workspace/bigdata-change-set-branch/bigdata-compatibility A /root/workspace/bigdata-change-set-branch/bigdata-compatibility/src A /root/workspace/bigdata-change-set-branch/bigdata-compatibility/src/test A /root/workspace/bigdata-change-set-branch/bigdata-compatibility/src/test/com A /root/workspace/bigdata-change-set-branch/bigdata-compatibility/src/test/com/bigdata A /root/workspace/bigdata-change-set-branch/bigdata-compatibility/src/test/com/bigdata/journal A /root/workspace/bigdata-change-set-branch/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java U /root/workspace/bigdata-change-set-branch/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java A /root/workspace/bigdata-change-set-branch/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPORelation.java U /root/workspace/bigdata-change-set-branch/bigdata-rdf/src/java/com/bigdata/rdf/vocab/BaseVocabulary.java Merge complete. ===== File Statistics: ===== Added: 7 Updated: 4 }}} This change set incorporates the following: - BaseVocabulary: change to use the as generated serialVersionUID from the last release. - TestSids: javadoc. - bigdata-perf/README.txt: note concerning the use of the ant bundleJar target. - build.xml: removed the jini dependencies from the Sesame WAR deployment target "install-sesame-server". - bigdata-compatibility: some initial steps towards https://sourceforge.net/apps/trac/bigdata/ticket/171 (binary compatibility test suite). Modified Paths: -------------- branches/CHANGE_SET_BRANCH/bigdata-perf/README.txt branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/vocab/BaseVocabulary.java branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java branches/CHANGE_SET_BRANCH/build.xml Added Paths: ----------- branches/CHANGE_SET_BRANCH/bigdata-compatibility/ branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/ branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/ branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/com/ branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/com/bigdata/ branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/ branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java Removed Paths: ------------- branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/ branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/ branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/com/ branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/com/bigdata/ branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/ branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java Deleted: branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java =================================================================== --- trunk/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java 2010-11-22 21:12:22 UTC (rev 3976) +++ branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java 2010-11-23 14:17:27 UTC (rev 3977) @@ -1,276 +0,0 @@ -/* - -Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ -/* - * Created on Nov 19, 2010 - */ -package com.bigdata.journal; - -import java.io.File; -import java.io.IOException; -import java.util.Properties; -import java.util.UUID; - -import junit.framework.TestCase2; - -import com.bigdata.Banner; -import com.bigdata.btree.IIndex; -import com.bigdata.btree.IndexMetadata; - -/** - * Test suite for binary compatibility, portability, and forward compatibility - * or automated migration of persistent stores and persistence or serialization - * capable objects across different bigdata releases. The tests in this suite - * rely on artifacts which are archived within SVN. - * - * @todo create w/ small extent and truncate (RW store does not support - * truncate). - * - * @todo test binary migration and forward compatibility. - * - * @todo stubs to create and organize artifacts,etc. - * - * @todo data driven test suite? - * - * @todo create artifact for each release, name the artifacts systematically, - * e.g., test.release.(RW|WORM).jnl or test.release.seg. Collect a list of - * the created artifacts and run each test against each of the versions of - * the artifact. - * - * @todo Force artifact file name case for file system compatibility? - * - * @todo test journal (WORM and RW), btree, index segment, row store, persistent - * data structures (checkpoints, index metadata, tuple serializers, etc.), - * RDF layer, RMI message formats, etc. - * - * @todo Specific tests for - * <p> - * Name2Addr and DefaultKeyBuilderFactory portability problem. See - * https://sourceforge.net/apps/trac/bigdata/ticket/193 - * <p> - * WORM global row store resolution problem introduced in the - * JOURNAL_HA_BRANCH. See - * https://sourceforge.net/apps/trac/bigdata/ticket/171#comment:5 - * <p> - * Sparse row store JDK encoding problem: - * https://sourceforge.net/apps/trac/bigdata/ticket/107 - */ -public class TestBinaryCompatibility extends TestCase2 { - - /** - * - */ - public TestBinaryCompatibility() { - } - - /** - * @param name - */ - public TestBinaryCompatibility(String name) { - super(name); - } - - /** - * @todo munge the release version into a name that is compatibility with - * the file system ("." to "_"). Store artifacts at each release? At - * each release in which an incompatibility is introduced? At each - * release in which a persistence capable data structure or change is - * introduced? - */ - static protected final File artifactDir = new File( - "bigdata-compatibility/src/resources/artifacts"); - - protected static class Version { - private final String version; - private final String revision; - public Version(String version,String revision) { - this.version = version; - this.revision = revision; - } - - /** - * The bigdata version number associated with the release. This is in - * the form <code>xx.yy.zz</code> - */ - public String getVersion() { - return version; - } - - /** - * The SVN repository revision associated with the release. This is in - * the form <code>####</code>. - */ - public String getRevision() { - return revision; - } - } - - /** - * Known release versions. - */ - protected static Version V_0_83_2 = new Version("0.83.2", "3349"); - - /** - * Tested Versions. - */ - protected Version[] versions = new Version[] { - V_0_83_2 - }; - - protected void setUp() throws Exception { - - Banner.banner(); - - super.setUp(); - - if (!artifactDir.exists()) { - - if (!artifactDir.mkdirs()) { - - throw new IOException("Could not create: " + artifactDir); - - } - - } - - for (Version version : versions) { - - final File versionDir = new File(artifactDir, version.getVersion()); - - if (!versionDir.exists()) { - - if (!versionDir.mkdirs()) { - - throw new IOException("Could not create: " + versionDir); - - } - - } - - } - - } - - protected void tearDown() throws Exception { - - super.tearDown(); - - } - - /** - * @throws Throwable - * - * @todo Each 'test' should run an instance of a class which knows how to - * create the appropriate artifacts and how to test them. - */ - public void test_WORM_compatibility_with_JOURNAL_HA_BRANCH() - throws Throwable { - - final Version version = V_0_83_2; - - final File versionDir = new File(artifactDir, version.getVersion()); - - final File artifactFile = new File(versionDir, getName() - + BufferMode.DiskWORM + Journal.Options.JNL); - - if (!artifactFile.exists()) { - - createArtifact(artifactFile); - - } - - verifyArtifact(artifactFile); - - } - - protected void createArtifact(final File artifactFile) throws Throwable { - - if (log.isInfoEnabled()) - log.info("Creating: " + artifactFile); - - final Properties properties = new Properties(); - - properties.setProperty(Journal.Options.FILE, artifactFile.toString()); - - properties.setProperty(Journal.Options.INITIAL_EXTENT, "" - + Journal.Options.minimumInitialExtent); - - final Journal journal = new Journal(properties); - - try { - - final IndexMetadata md = new IndexMetadata(UUID.randomUUID()); - - final IIndex ndx = journal.registerIndex("kb.spo.SPO", md); - - ndx.insert(1,1); - - journal.commit(); - - // reduce to minimum footprint. - journal.truncate(); - - } catch (Throwable t) { - - journal.destroy(); - - throw new RuntimeException(t); - - } finally { - - if (journal.isOpen()) - journal.close(); - - } - - } - - protected void verifyArtifact(final File artifactFile) throws Throwable { - - if (log.isInfoEnabled()) - log.info("Verifying: " + artifactFile); - - final Properties properties = new Properties(); - - properties.setProperty(Journal.Options.FILE, artifactFile.toString()); - - final Journal journal = new Journal(properties); - - try { - - final IIndex ndx = journal.getIndex("kb.spo.SPO"); - - assertNotNull(ndx); - - assertEquals(1,ndx.lookup(1)); - - } finally { - - journal.close(); - - } - - } - -} Copied: branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java (from rev 3976, trunk/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java) =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java (rev 0) +++ branches/CHANGE_SET_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java 2010-11-23 14:17:27 UTC (rev 3977) @@ -0,0 +1,276 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +/* + * Created on Nov 19, 2010 + */ +package com.bigdata.journal; + +import java.io.File; +import java.io.IOException; +import java.util.Properties; +import java.util.UUID; + +import junit.framework.TestCase2; + +import com.bigdata.Banner; +import com.bigdata.btree.IIndex; +import com.bigdata.btree.IndexMetadata; + +/** + * Test suite for binary compatibility, portability, and forward compatibility + * or automated migration of persistent stores and persistence or serialization + * capable objects across different bigdata releases. The tests in this suite + * rely on artifacts which are archived within SVN. + * + * @todo create w/ small extent and truncate (RW store does not support + * truncate). + * + * @todo test binary migration and forward compatibility. + * + * @todo stubs to create and organize artifacts,etc. + * + * @todo data driven test suite? + * + * @todo create artifact for each release, name the artifacts systematically, + * e.g., test.release.(RW|WORM).jnl or test.release.seg. Collect a list of + * the created artifacts and run each test against each of the versions of + * the artifact. + * + * @todo Force artifact file name case for file system compatibility? + * + * @todo test journal (WORM and RW), btree, index segment, row store, persistent + * data structures (checkpoints, index metadata, tuple serializers, etc.), + * RDF layer, RMI message formats, etc. + * + * @todo Specific tests for + * <p> + * Name2Addr and DefaultKeyBuilderFactory portability problem. See + * https://sourceforge.net/apps/trac/bigdata/ticket/193 + * <p> + * WORM global row store resolution problem introduced in the + * JOURNAL_HA_BRANCH. See + * https://sourceforge.net/apps/trac/bigdata/ticket/171#comment:5 + * <p> + * Sparse row store JDK encoding problem: + * https://sourceforge.net/apps/trac/bigdata/ticket/107 + */ +public class TestBinaryCompatibility extends TestCase2 { + + /** + * + */ + public TestBinaryCompatibility() { + } + + /** + * @param name + */ + public TestBinaryCompatibility(String name) { + super(name); + } + + /** + * @todo munge the release version into a name that is compatibility with + * the file system ("." to "_"). Store artifacts at each release? At + * each release in which an incompatibility is introduced? At each + * release in which a persistence capable data structure or change is + * introduced? + */ + static protected final File artifactDir = new File( + "bigdata-compatibility/src/resources/artifacts"); + + protected static class Version { + private final String version; + private final String revision; + public Version(String version,String revision) { + this.version = version; + this.revision = revision; + } + + /** + * The bigdata version number associated with the release. This is in + * the form <code>xx.yy.zz</code> + */ + public String getVersion() { + return version; + } + + /** + * The SVN repository revision associated with the release. This is in + * the form <code>####</code>. + */ + public String getRevision() { + return revision; + } + } + + /** + * Known release versions. + */ + protected static Version V_0_83_2 = new Version("0.83.2", "3349"); + + /** + * Tested Versions. + */ + protected Version[] versions = new Version[] { + V_0_83_2 + }; + + protected void setUp() throws Exception { + + Banner.banner(); + + super.setUp(); + + if (!artifactDir.exists()) { + + if (!artifactDir.mkdirs()) { + + throw new IOException("Could not create: " + artifactDir); + + } + + } + + for (Version version : versions) { + + final File versionDir = new File(artifactDir, version.getVersion()); + + if (!versionDir.exists()) { + + if (!versionDir.mkdirs()) { + + throw new IOException("Could not create: " + versionDir); + + } + + } + + } + + } + + protected void tearDown() throws Exception { + + super.tearDown(); + + } + + /** + * @throws Throwable + * + * @todo Each 'test' should run an instance of a class which knows how to + * create the appropriate artifacts and how to test them. + */ + public void test_WORM_compatibility_with_JOURNAL_HA_BRANCH() + throws Throwable { + + final Version version = V_0_83_2; + + final File versionDir = new File(artifactDir, version.getVersion()); + + final File artifactFile = new File(versionDir, getName() + + BufferMode.DiskWORM + Journal.Options.JNL); + + if (!artifactFile.exists()) { + + createArtifact(artifactFile); + + } + + verifyArtifact(artifactFile); + + } + + protected void createArtifact(final File artifactFile) throws Throwable { + + if (log.isInfoEnabled()) + log.info("Creating: " + artifactFile); + + final Properties properties = new Properties(); + + properties.setProperty(Journal.Options.FILE, artifactFile.toString()); + + properties.setProperty(Journal.Options.INITIAL_EXTENT, "" + + Journal.Options.minimumInitialExtent); + + final Journal journal = new Journal(properties); + + try { + + final IndexMetadata md = new IndexMetadata(UUID.randomUUID()); + + final IIndex ndx = journal.registerIndex("kb.spo.SPO", md); + + ndx.insert(1,1); + + journal.commit(); + + // reduce to minimum footprint. + journal.truncate(); + + } catch (Throwable t) { + + journal.destroy(); + + throw new RuntimeException(t); + + } finally { + + if (journal.isOpen()) + journal.close(); + + } + + } + + protected void verifyArtifact(final File artifactFile) throws Throwable { + + if (log.isInfoEnabled()) + log.info("Verifying: " + artifactFile); + + final Properties properties = new Properties(); + + properties.setProperty(Journal.Options.FILE, artifactFile.toString()); + + final Journal journal = new Journal(properties); + + try { + + final IIndex ndx = journal.getIndex("kb.spo.SPO"); + + assertNotNull(ndx); + + assertEquals(1,ndx.lookup(1)); + + } finally { + + journal.close(); + + } + + } + +} Modified: branches/CHANGE_SET_BRANCH/bigdata-perf/README.txt =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-perf/README.txt 2010-11-22 21:12:22 UTC (rev 3976) +++ branches/CHANGE_SET_BRANCH/bigdata-perf/README.txt 2010-11-23 14:17:27 UTC (rev 3977) @@ -1,2 +1,6 @@ This module contains drivers for a variety of data sets and benchmarks used as -part of a performance test suite. \ No newline at end of file +part of a performance test suite. + +Note: You must run "ant bundleJar" in the top-level directory first. This will +build the bigdata code base and bundle together the various dependencies so they +will be available for the ant scripts in this module. Modified: branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/vocab/BaseVocabulary.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/vocab/BaseVocabulary.java 2010-11-22 21:12:22 UTC (rev 3976) +++ branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/vocab/BaseVocabulary.java 2010-11-23 14:17:27 UTC (rev 3977) @@ -65,6 +65,11 @@ final static public Logger log = Logger.getLogger(BaseVocabulary.class); /** + * The serialVersionUID as reported by the trunk on Oct 6, 2010. + */ + private static final long serialVersionUID = 1560142397515291331L; + + /** * The database that is the authority for the defined terms and term * identifiers. This will be <code>null</code> when the de-serialization * ctor is used. Modified: branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java 2010-11-22 21:12:22 UTC (rev 3976) +++ branches/CHANGE_SET_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java 2010-11-23 14:17:27 UTC (rev 3977) @@ -44,6 +44,8 @@ import com.bigdata.rdf.vocab.NoVocabulary; /** + * Test case for reverse lookup from SID to statement. + * * @author <a href="mailto:mrp...@us...">Mike Personick</a> * @version $Id$ */ Modified: branches/CHANGE_SET_BRANCH/build.xml =================================================================== --- branches/CHANGE_SET_BRANCH/build.xml 2010-11-22 21:12:22 UTC (rev 3976) +++ branches/CHANGE_SET_BRANCH/build.xml 2010-11-23 14:17:27 UTC (rev 3977) @@ -1992,10 +1992,12 @@ <fileset dir="${bigdata.dir}/bigdata/lib"> <include name="**/*.jar" /> </fileset> +<!-- Jini should not be required for the Sesame WAR. <fileset dir="${bigdata.dir}/bigdata-jini/lib/jini/lib"> <include name="jini-core.jar" /> <include name="jini-ext.jar" /> </fileset> + --> </copy> <!-- copy resources to Workbench webapp. --> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |