From: <tho...@us...> - 2010-11-23 15:22:37
|
Revision: 3979 http://bigdata.svn.sourceforge.net/bigdata/?rev=3979&view=rev Author: thompsonbry Date: 2010-11-23 15:22:27 +0000 (Tue, 23 Nov 2010) Log Message: ----------- Merge CHANGE_SET_BRANCH to trunk [r3608:HEAD]. Note: There is a problem in TestChangeSets when run with TestBigdataSailWithQuads. The test needs to be modified in order to not run the TM test variants when in quads mode. https://sourceforge.net/apps/trac/bigdata/ticket/166 has been amended to note this issue which will be fixed in the trunk. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataStatementImpl.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexRemover.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexWriteProc.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexWriter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/StatementWriter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepositoryConnection.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java Added Paths: ----------- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexMutation.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java Removed Paths: ------------- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java Modified: trunk/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -39,6 +39,7 @@ import java.io.ObjectInput; import java.io.ObjectOutput; import java.io.OutputStream; +import java.util.Arrays; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -795,18 +796,34 @@ IResultHandler<ResultBitBuffer, ResultBitBuffer> { private final boolean[] results; + + /** + * I added this so I could encode information about tuple modification + * that takes more than one boolean to encode. For example, SPOs can + * be: INSERTED, REMOVED, UPDATED, NO_OP (2 bits). + */ + private final int multiplier; + private final AtomicInteger onCount = new AtomicInteger(); public ResultBitBufferHandler(final int nkeys) { + + this(nkeys, 1); + + } + + public ResultBitBufferHandler(final int nkeys, final int multiplier) { - results = new boolean[nkeys]; + results = new boolean[nkeys*multiplier]; + this.multiplier = multiplier; } public void aggregate(final ResultBitBuffer result, final Split split) { - System.arraycopy(result.getResult(), 0, results, split.fromIndex, - split.ntuples); + System.arraycopy(result.getResult(), 0, results, + split.fromIndex*multiplier, + split.ntuples*multiplier); onCount.addAndGet(result.getOnCount()); Modified: trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -144,21 +144,21 @@ final private UUID dataServiceUUID = UUID.randomUUID(); - @Override +// @Override public IBigdataFederation getFederation() { return fed; } - @Override +// @Override public DataService getDataService() { throw new UnsupportedOperationException(); } - @Override +// @Override public UUID getDataServiceUUID() { return dataServiceUUID; Deleted: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -1,98 +0,0 @@ -package com.bigdata.rdf.changesets; - -import java.util.Comparator; -import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.spo.SPOComparator; - -public class ChangeRecord implements IChangeRecord { - - private final ISPO stmt; - - private final ChangeAction action; - -// private final StatementEnum oldType; - - public ChangeRecord(final ISPO stmt, final ChangeAction action) { - -// this(stmt, action, null); -// -// } -// -// public ChangeRecord(final BigdataStatement stmt, final ChangeAction action, -// final StatementEnum oldType) { -// - this.stmt = stmt; - this.action = action; -// this.oldType = oldType; - - } - - public ChangeAction getChangeAction() { - - return action; - - } - -// public StatementEnum getOldStatementType() { -// -// return oldType; -// -// } - - public ISPO getStatement() { - - return stmt; - - } - - @Override - public boolean equals(Object o) { - - if (o == this) - return true; - - if (o == null || o instanceof IChangeRecord == false) - return false; - - final IChangeRecord rec = (IChangeRecord) o; - - final ISPO stmt2 = rec.getStatement(); - - // statements are equal - if (stmt == stmt2 || - (stmt != null && stmt2 != null && stmt.equals(stmt2))) { - - // actions are equal - return action == rec.getChangeAction(); - - } - - return false; - - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - - sb.append(action).append(": ").append(stmt); - - return sb.toString(); - - } - - public static final Comparator<IChangeRecord> COMPARATOR = - new Comparator<IChangeRecord>() { - - public int compare(final IChangeRecord r1, final IChangeRecord r2) { - - final ISPO spo1 = r1.getStatement(); - final ISPO spo2 = r2.getStatement(); - - return SPOComparator.INSTANCE.compare(spo1, spo2); - - } - - }; - -} Copied: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java (from rev 3978, branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java) =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java (rev 0) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -0,0 +1,98 @@ +package com.bigdata.rdf.changesets; + +import java.util.Comparator; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.SPOComparator; + +public class ChangeRecord implements IChangeRecord { + + private final ISPO stmt; + + private final ChangeAction action; + +// private final StatementEnum oldType; + + public ChangeRecord(final ISPO stmt, final ChangeAction action) { + +// this(stmt, action, null); +// +// } +// +// public ChangeRecord(final BigdataStatement stmt, final ChangeAction action, +// final StatementEnum oldType) { +// + this.stmt = stmt; + this.action = action; +// this.oldType = oldType; + + } + + public ChangeAction getChangeAction() { + + return action; + + } + +// public StatementEnum getOldStatementType() { +// +// return oldType; +// +// } + + public ISPO getStatement() { + + return stmt; + + } + + @Override + public boolean equals(Object o) { + + if (o == this) + return true; + + if (o == null || o instanceof IChangeRecord == false) + return false; + + final IChangeRecord rec = (IChangeRecord) o; + + final ISPO stmt2 = rec.getStatement(); + + // statements are equal + if (stmt == stmt2 || + (stmt != null && stmt2 != null && stmt.equals(stmt2))) { + + // actions are equal + return action == rec.getChangeAction(); + + } + + return false; + + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + + sb.append(action).append(": ").append(stmt); + + return sb.toString(); + + } + + public static final Comparator<IChangeRecord> COMPARATOR = + new Comparator<IChangeRecord>() { + + public int compare(final IChangeRecord r1, final IChangeRecord r2) { + + final ISPO spo1 = r1.getStatement(); + final ISPO spo2 = r2.getStatement(); + + return SPOComparator.INSTANCE.compare(spo1, spo2); + + } + + }; + +} Deleted: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -1,38 +0,0 @@ -package com.bigdata.rdf.changesets; - -/** - * Provides detailed information on changes made to statements in the database. - * Change records are generated for any statements that are used in - * addStatement() or removeStatements() operations on the SAIL connection, as - * well as any inferred statements that are added or removed as a result of - * truth maintenance when the database has inference enabled. Change records - * will be sent to an instance of this class via the - * {@link #changeEvent(IChangeRecord)} method. These events will - * occur on an ongoing basis as statements are added to or removed from the - * indices. It is the change log's responsibility to collect change records. - * When the transaction is actually committed (or aborted), the change log will - * receive notification via {@link #transactionCommited()} or - * {@link #transactionAborted()}. - */ -public interface IChangeLog { - - /** - * Occurs when a statement add or remove is flushed to the indices (but - * not yet committed). - * - * @param record - * the {@link IChangeRecord} - */ - void changeEvent(final IChangeRecord record); - - /** - * Occurs when the current SAIL transaction is committed. - */ - void transactionCommited(); - - /** - * Occurs if the current SAIL transaction is aborted. - */ - void transactionAborted(); - -} Copied: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java (from rev 3978, branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java) =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java (rev 0) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -0,0 +1,38 @@ +package com.bigdata.rdf.changesets; + +/** + * Provides detailed information on changes made to statements in the database. + * Change records are generated for any statements that are used in + * addStatement() or removeStatements() operations on the SAIL connection, as + * well as any inferred statements that are added or removed as a result of + * truth maintenance when the database has inference enabled. Change records + * will be sent to an instance of this class via the + * {@link #changeEvent(IChangeRecord)} method. These events will + * occur on an ongoing basis as statements are added to or removed from the + * indices. It is the change log's responsibility to collect change records. + * When the transaction is actually committed (or aborted), the change log will + * receive notification via {@link #transactionCommited()} or + * {@link #transactionAborted()}. + */ +public interface IChangeLog { + + /** + * Occurs when a statement add or remove is flushed to the indices (but + * not yet committed). + * + * @param record + * the {@link IChangeRecord} + */ + void changeEvent(final IChangeRecord record); + + /** + * Occurs when the current SAIL transaction is committed. + */ + void transactionCommited(); + + /** + * Occurs if the current SAIL transaction is aborted. + */ + void transactionAborted(); + +} Deleted: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -1,120 +0,0 @@ -package com.bigdata.rdf.changesets; - -import com.bigdata.rdf.model.BigdataStatement; -import com.bigdata.rdf.model.StatementEnum; -import com.bigdata.rdf.spo.ISPO; - -/** - * Provides detailed information on changes made to statements in the database. - * Change records are generated for any statements that are used in - * addStatement() or removeStatements() operations on the SAIL connection, as - * well as any inferred statements that are added or removed as a result of - * truth maintenance when the database has inference enabled. - * <p> - * See {@link IChangeLog}. - */ -public interface IChangeRecord { - - /** - * Attempting to add or remove statements can have a number of different - * effects. This enum captures the different actions that can take place as - * a result of trying to add or remove a statement from the database. - */ - public enum ChangeAction { - - /** - * The focus statement was not in the database before and will be - * in the database after the commit. This can be the result of either - * explicit addStatement() operations on the SAIL connection, or from - * new inferences being generated via truth maintenance when the - * database has inference enabled. If the focus statement has a - * statement type of explicit then it was added via an addStatement() - * operation. If the focus statement has a statement type of inferred - * then it was added via truth maintenance. - */ - INSERTED, - - /** - * The focus statement was in the database before and will not - * be in the database after the commit. When the database has inference - * and truth maintenance enabled, the statement that is the focus of - * this change record was either an explicit statement that was the - * subject of a removeStatements() operation on the connection, or it - * was an inferred statement that was removed as a result of truth - * maintenance. Either way, the statement is no longer provable as an - * inference using other statements still in the database after the - * commit. If it were still provable, the explicit statement would have - * had its type changed to inferred, and the inferred statement would - * have remained untouched by truth maintenance. If an inferred - * statement was the subject of a removeStatement() operation on the - * connection it would have resulted in a no-op, since inferences can - * only be removed via truth maintenance. - */ - REMOVED, - - /** - * This change action can only occur when inference and truth - * maintenance are enabled on the database. Sometimes an attempt at - * statement addition or removal via an addStatement() or - * removeStatements() operation on the connection will result in a type - * change rather than an actual assertion or deletion. When in - * inference mode, statements can have one of three statement types: - * explicit, inferred, or axiom (see {@link StatementEnum}). There are - * several reasons why a statement will change type rather than be - * asserted or deleted: - * <p> - * <ul> - * <li> A statement is asserted, but already exists in the database as - * an inference or an axiom. The existing statement will have its type - * changed from inference or axiom to explicit. </li> - * <li> An explicit statement is retracted, but is still provable by - * other means. It will have its type changed from explicit to - * inference. </li> - * <li> An explicit statement is retracted, but is one of the axioms - * needed for inference. It will have its type changed from explicit to - * axiom. </li> - * </ul> - */ - UPDATED, - -// /** -// * This change action can occur for one of two reasons: -// * <p> -// * <ul> -// * <li> A statement is asserted, but already exists in the database as -// * an explicit statement. </li> -// * <li> An inferred statement or an axiom is retracted. Only explicit -// * statements can be retracted via removeStatements() operations. </li> -// * </ul> -// */ -// NO_OP - - } - - /** - * Return the ISPO that is the focus of this change record. - * - * @return - * the {@link ISPO} - */ - ISPO getStatement(); - - /** - * Return the change action for this change record. - * - * @return - * the {@link ChangeAction} - */ - ChangeAction getChangeAction(); - -// /** -// * If the change action is {@link ChangeAction#TYPE_CHANGE}, this method -// * will return the old statement type of the focus statement. The -// * new statement type is available on the focus statement itself. -// * -// * @return -// * the old statement type of the focus statement -// */ -// StatementEnum getOldStatementType(); - -} Copied: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java (from rev 3978, branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java) =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java (rev 0) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -0,0 +1,120 @@ +package com.bigdata.rdf.changesets; + +import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.model.StatementEnum; +import com.bigdata.rdf.spo.ISPO; + +/** + * Provides detailed information on changes made to statements in the database. + * Change records are generated for any statements that are used in + * addStatement() or removeStatements() operations on the SAIL connection, as + * well as any inferred statements that are added or removed as a result of + * truth maintenance when the database has inference enabled. + * <p> + * See {@link IChangeLog}. + */ +public interface IChangeRecord { + + /** + * Attempting to add or remove statements can have a number of different + * effects. This enum captures the different actions that can take place as + * a result of trying to add or remove a statement from the database. + */ + public enum ChangeAction { + + /** + * The focus statement was not in the database before and will be + * in the database after the commit. This can be the result of either + * explicit addStatement() operations on the SAIL connection, or from + * new inferences being generated via truth maintenance when the + * database has inference enabled. If the focus statement has a + * statement type of explicit then it was added via an addStatement() + * operation. If the focus statement has a statement type of inferred + * then it was added via truth maintenance. + */ + INSERTED, + + /** + * The focus statement was in the database before and will not + * be in the database after the commit. When the database has inference + * and truth maintenance enabled, the statement that is the focus of + * this change record was either an explicit statement that was the + * subject of a removeStatements() operation on the connection, or it + * was an inferred statement that was removed as a result of truth + * maintenance. Either way, the statement is no longer provable as an + * inference using other statements still in the database after the + * commit. If it were still provable, the explicit statement would have + * had its type changed to inferred, and the inferred statement would + * have remained untouched by truth maintenance. If an inferred + * statement was the subject of a removeStatement() operation on the + * connection it would have resulted in a no-op, since inferences can + * only be removed via truth maintenance. + */ + REMOVED, + + /** + * This change action can only occur when inference and truth + * maintenance are enabled on the database. Sometimes an attempt at + * statement addition or removal via an addStatement() or + * removeStatements() operation on the connection will result in a type + * change rather than an actual assertion or deletion. When in + * inference mode, statements can have one of three statement types: + * explicit, inferred, or axiom (see {@link StatementEnum}). There are + * several reasons why a statement will change type rather than be + * asserted or deleted: + * <p> + * <ul> + * <li> A statement is asserted, but already exists in the database as + * an inference or an axiom. The existing statement will have its type + * changed from inference or axiom to explicit. </li> + * <li> An explicit statement is retracted, but is still provable by + * other means. It will have its type changed from explicit to + * inference. </li> + * <li> An explicit statement is retracted, but is one of the axioms + * needed for inference. It will have its type changed from explicit to + * axiom. </li> + * </ul> + */ + UPDATED, + +// /** +// * This change action can occur for one of two reasons: +// * <p> +// * <ul> +// * <li> A statement is asserted, but already exists in the database as +// * an explicit statement. </li> +// * <li> An inferred statement or an axiom is retracted. Only explicit +// * statements can be retracted via removeStatements() operations. </li> +// * </ul> +// */ +// NO_OP + + } + + /** + * Return the ISPO that is the focus of this change record. + * + * @return + * the {@link ISPO} + */ + ISPO getStatement(); + + /** + * Return the change action for this change record. + * + * @return + * the {@link ChangeAction} + */ + ChangeAction getChangeAction(); + +// /** +// * If the change action is {@link ChangeAction#TYPE_CHANGE}, this method +// * will return the old statement type of the focus statement. The +// * new statement type is available on the focus statement itself. +// * +// * @return +// * the old statement type of the focus statement +// */ +// StatementEnum getOldStatementType(); + +} Deleted: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -1,163 +0,0 @@ -package com.bigdata.rdf.changesets; - -import java.util.Collection; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.Map; -import org.apache.log4j.Logger; -import com.bigdata.rdf.model.BigdataStatement; -import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.store.AbstractTripleStore; -import com.bigdata.rdf.store.BigdataStatementIterator; -import com.bigdata.striterator.ChunkedArrayIterator; - -/** - * This is a very simple implementation of a change log. NOTE: This is not - * a particularly great implementation. First of all it ends up storing - * two copies of the change set. Secondly it needs to be smarter about - * concurrency, or maybe we can be smart about it when we do the - * implementation on the other side (the SAIL connection can just write - * change events to a buffer and then the buffer can be drained by - * another thread that doesn't block the actual read/write operations, - * although then we need to be careful not to issue the committed() - * notification before the buffer is drained). - * - * @author mike - * - */ -public class InMemChangeLog implements IChangeLog { - - protected static final Logger log = Logger.getLogger(InMemChangeLog.class); - - /** - * Running tally of new changes since the last commit notification. - */ - private final Map<ISPO,IChangeRecord> changeSet = - new HashMap<ISPO, IChangeRecord>(); - - /** - * Keep a record of the change set as of the last commit. - */ - private final Map<ISPO,IChangeRecord> committed = - new HashMap<ISPO, IChangeRecord>(); - - /** - * See {@link IChangeLog#changeEvent(IChangeRecord)}. - */ - public synchronized void changeEvent(final IChangeRecord record) { - - if (log.isInfoEnabled()) - log.info(record); - - changeSet.put(record.getStatement(), record); - - } - - /** - * See {@link IChangeLog#transactionCommited()}. - */ - public synchronized void transactionCommited() { - - if (log.isInfoEnabled()) - log.info("transaction committed"); - - committed.clear(); - - committed.putAll(changeSet); - - changeSet.clear(); - - } - - /** - * See {@link IChangeLog#transactionAborted()}. - */ - public synchronized void transactionAborted() { - - if (log.isInfoEnabled()) - log.info("transaction aborted"); - - changeSet.clear(); - - } - - /** - * Return the change set as of the last commmit point. - * - * @return - * a collection of {@link IChangeRecord}s as of the last commit - * point - */ - public Collection<IChangeRecord> getLastCommit() { - - return committed.values(); - - } - - /** - * Return the change set as of the last commmit point, using the supplied - * database to resolve ISPOs to BigdataStatements. - * - * @return - * a collection of {@link IChangeRecord}s as of the last commit - * point - */ - public Collection<IChangeRecord> getLastCommit(final AbstractTripleStore db) { - - return resolve(db, committed.values()); - - } - - /** - * Use the supplied database to turn a set of ISPO change records into - * BigdataStatement change records. BigdataStatements also implement - * ISPO, the difference being that BigdataStatements also contain - * materialized RDF terms for the 3 (or 4) positions, in addition to just - * the internal identifiers (IVs) for those terms. - * - * @param db - * the database containing the lexicon needed to materialize - * the BigdataStatement objects - * @param unresolved - * the ISPO change records that came from IChangeLog notification - * events - * @return - * the fully resolves BigdataStatement change records - */ - private Collection<IChangeRecord> resolve(final AbstractTripleStore db, - final Collection<IChangeRecord> unresolved) { - - final Collection<IChangeRecord> resolved = - new LinkedList<IChangeRecord>(); - - // collect up the ISPOs out of the unresolved change records - final ISPO[] spos = new ISPO[unresolved.size()]; - int i = 0; - for (IChangeRecord rec : unresolved) { - spos[i++] = rec.getStatement(); - } - - // use the database to resolve them into BigdataStatements - final BigdataStatementIterator it = - db.asStatementIterator( - new ChunkedArrayIterator<ISPO>(i, spos, null/* keyOrder */)); - - /* - * the BigdataStatementIterator will produce BigdataStatement objects - * in the same order as the original ISPO array - */ - for (IChangeRecord rec : unresolved) { - - final BigdataStatement stmt = it.next(); - - resolved.add(new ChangeRecord(stmt, rec.getChangeAction())); - - } - - return resolved; - - } - - - -} Copied: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java (from rev 3978, branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java) =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java (rev 0) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -0,0 +1,163 @@ +package com.bigdata.rdf.changesets; + +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Map; +import org.apache.log4j.Logger; +import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.store.BigdataStatementIterator; +import com.bigdata.striterator.ChunkedArrayIterator; + +/** + * This is a very simple implementation of a change log. NOTE: This is not + * a particularly great implementation. First of all it ends up storing + * two copies of the change set. Secondly it needs to be smarter about + * concurrency, or maybe we can be smart about it when we do the + * implementation on the other side (the SAIL connection can just write + * change events to a buffer and then the buffer can be drained by + * another thread that doesn't block the actual read/write operations, + * although then we need to be careful not to issue the committed() + * notification before the buffer is drained). + * + * @author mike + * + */ +public class InMemChangeLog implements IChangeLog { + + protected static final Logger log = Logger.getLogger(InMemChangeLog.class); + + /** + * Running tally of new changes since the last commit notification. + */ + private final Map<ISPO,IChangeRecord> changeSet = + new HashMap<ISPO, IChangeRecord>(); + + /** + * Keep a record of the change set as of the last commit. + */ + private final Map<ISPO,IChangeRecord> committed = + new HashMap<ISPO, IChangeRecord>(); + + /** + * See {@link IChangeLog#changeEvent(IChangeRecord)}. + */ + public synchronized void changeEvent(final IChangeRecord record) { + + if (log.isInfoEnabled()) + log.info(record); + + changeSet.put(record.getStatement(), record); + + } + + /** + * See {@link IChangeLog#transactionCommited()}. + */ + public synchronized void transactionCommited() { + + if (log.isInfoEnabled()) + log.info("transaction committed"); + + committed.clear(); + + committed.putAll(changeSet); + + changeSet.clear(); + + } + + /** + * See {@link IChangeLog#transactionAborted()}. + */ + public synchronized void transactionAborted() { + + if (log.isInfoEnabled()) + log.info("transaction aborted"); + + changeSet.clear(); + + } + + /** + * Return the change set as of the last commmit point. + * + * @return + * a collection of {@link IChangeRecord}s as of the last commit + * point + */ + public Collection<IChangeRecord> getLastCommit() { + + return committed.values(); + + } + + /** + * Return the change set as of the last commmit point, using the supplied + * database to resolve ISPOs to BigdataStatements. + * + * @return + * a collection of {@link IChangeRecord}s as of the last commit + * point + */ + public Collection<IChangeRecord> getLastCommit(final AbstractTripleStore db) { + + return resolve(db, committed.values()); + + } + + /** + * Use the supplied database to turn a set of ISPO change records into + * BigdataStatement change records. BigdataStatements also implement + * ISPO, the difference being that BigdataStatements also contain + * materialized RDF terms for the 3 (or 4) positions, in addition to just + * the internal identifiers (IVs) for those terms. + * + * @param db + * the database containing the lexicon needed to materialize + * the BigdataStatement objects + * @param unresolved + * the ISPO change records that came from IChangeLog notification + * events + * @return + * the fully resolves BigdataStatement change records + */ + private Collection<IChangeRecord> resolve(final AbstractTripleStore db, + final Collection<IChangeRecord> unresolved) { + + final Collection<IChangeRecord> resolved = + new LinkedList<IChangeRecord>(); + + // collect up the ISPOs out of the unresolved change records + final ISPO[] spos = new ISPO[unresolved.size()]; + int i = 0; + for (IChangeRecord rec : unresolved) { + spos[i++] = rec.getStatement(); + } + + // use the database to resolve them into BigdataStatements + final BigdataStatementIterator it = + db.asStatementIterator( + new ChunkedArrayIterator<ISPO>(i, spos, null/* keyOrder */)); + + /* + * the BigdataStatementIterator will produce BigdataStatement objects + * in the same order as the original ISPO array + */ + for (IChangeRecord rec : unresolved) { + + final BigdataStatement stmt = it.next(); + + resolved.add(new ChangeRecord(stmt, rec.getChangeAction())); + + } + + return resolved; + + } + + + +} Deleted: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -1,208 +0,0 @@ -package com.bigdata.rdf.changesets; - -import java.util.Iterator; -import java.util.Map; -import org.apache.log4j.Logger; -import com.bigdata.rdf.changesets.IChangeRecord.ChangeAction; -import com.bigdata.rdf.internal.IV; -import com.bigdata.rdf.model.BigdataBNode; -import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.spo.SPO; -import com.bigdata.rdf.spo.ISPO.ModifiedEnum; -import com.bigdata.rdf.store.AbstractTripleStore; -import com.bigdata.relation.accesspath.IElementFilter; -import com.bigdata.striterator.ChunkedArrayIterator; -import com.bigdata.striterator.IChunkedOrderedIterator; - -public class StatementWriter { - - protected static final Logger log = Logger.getLogger(StatementWriter.class); - - public static long addStatements(final AbstractTripleStore database, - final AbstractTripleStore statementStore, - final boolean copyOnly, - final IElementFilter<ISPO> filter, - final IChunkedOrderedIterator<ISPO> itr, - final IChangeLog changeLog) { - - long n = 0; - - if (itr.hasNext()) { - -// final BigdataStatementIteratorImpl itr2 = -// new BigdataStatementIteratorImpl(database, bnodes, itr) -// .start(database.getExecutorService()); -// -// final BigdataStatement[] stmts = -// new BigdataStatement[database.getChunkCapacity()]; - final SPO[] stmts = new SPO[database.getChunkCapacity()]; - - int i = 0; - while ((i = nextChunk(itr, stmts)) > 0) { - n += addStatements(database, statementStore, copyOnly, filter, - stmts, i, changeLog); - } - - } - - return n; - - } - - private static long addStatements(final AbstractTripleStore database, - final AbstractTripleStore statementStore, - final boolean copyOnly, - final IElementFilter<ISPO> filter, - final ISPO[] stmts, - final int numStmts, - final IChangeLog changeLog) { - -// final SPO[] tmp = allocateSPOs(stmts, numStmts); - - final long n = database.addStatements(statementStore, copyOnly, - new ChunkedArrayIterator<ISPO>(numStmts, stmts, - null/* keyOrder */), filter); - - // Copy the state of the isModified() flag and notify changeLog - for (int i = 0; i < numStmts; i++) { - - if (stmts[i].isModified()) { - -// stmts[i].setModified(true); - - if (changeLog != null) { - - switch(stmts[i].getModified()) { - case INSERTED: - changeLog.changeEvent(new ChangeRecord(stmts[i], ChangeAction.INSERTED)); - break; - case UPDATED: - changeLog.changeEvent(new ChangeRecord(stmts[i], ChangeAction.UPDATED)); - break; - case REMOVED: - throw new AssertionError(); - default: - break; - } - - } - - } - - } - - return n; - - } - - public static long removeStatements(final AbstractTripleStore database, - final IChunkedOrderedIterator<ISPO> itr, - final boolean computeClosureForStatementIdentifiers, - final IChangeLog changeLog) { - - long n = 0; - - if (itr.hasNext()) { - -// final BigdataStatementIteratorImpl itr2 = -// new BigdataStatementIteratorImpl(database, bnodes, itr) -// .start(database.getExecutorService()); -// -// final BigdataStatement[] stmts = -// new BigdataStatement[database.getChunkCapacity()]; - final SPO[] stmts = new SPO[database.getChunkCapacity()]; - - int i = 0; - while ((i = nextChunk(itr, stmts)) > 0) { - n += removeStatements(database, stmts, i, - computeClosureForStatementIdentifiers, changeLog); - } - - } - - return n; - - } - - private static long removeStatements(final AbstractTripleStore database, - final ISPO[] stmts, - final int numStmts, - final boolean computeClosureForStatementIdentifiers, - final IChangeLog changeLog) { - - final long n = database.removeStatements( - new ChunkedArrayIterator<ISPO>(numStmts, stmts, - null/* keyOrder */), - computeClosureForStatementIdentifiers); - - // Copy the state of the isModified() flag and notify changeLog - for (int i = 0; i < numStmts; i++) { - - if (stmts[i].isModified()) { - - // just to be safe - stmts[i].setModified(ModifiedEnum.REMOVED); - - changeLog.changeEvent( - new ChangeRecord(stmts[i], ChangeAction.REMOVED)); - - } - - } - - return n; - - } - - private static int nextChunk(final Iterator<ISPO> itr, - final ISPO[] stmts) { - - assert stmts != null && stmts.length > 0; - - int i = 0; - while (itr.hasNext()) { - stmts[i++] = itr.next(); - if (i == stmts.length) { - // stmts[] is full - return i; - } - } - - /* - * stmts[] is empty (i = 0) or partially - * full (i > 0 && i < stmts.length) - */ - return i; - - } - -// private static SPO[] allocateSPOs(final BigdataStatement[] stmts, -// final int numStmts) { -// -// final SPO[] tmp = new SPO[numStmts]; -// -// for (int i = 0; i < tmp.length; i++) { -// -// final BigdataStatement stmt = stmts[i]; -// -// final SPO spo = new SPO(stmt); -// -// if (log.isDebugEnabled()) -// log.debug("writing: " + stmt.toString() + " (" + spo + ")"); -// -// if(!spo.isFullyBound()) { -// -// throw new AssertionError("Not fully bound? : " + spo); -// -// } -// -// tmp[i] = spo; -// -// } -// -// return tmp; -// -// -// } - -} Copied: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java (from rev 3978, branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java) =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java (rev 0) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -0,0 +1,208 @@ +package com.bigdata.rdf.changesets; + +import java.util.Iterator; +import java.util.Map; +import org.apache.log4j.Logger; +import com.bigdata.rdf.changesets.IChangeRecord.ChangeAction; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.model.BigdataBNode; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.SPO; +import com.bigdata.rdf.spo.ISPO.ModifiedEnum; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.relation.accesspath.IElementFilter; +import com.bigdata.striterator.ChunkedArrayIterator; +import com.bigdata.striterator.IChunkedOrderedIterator; + +public class StatementWriter { + + protected static final Logger log = Logger.getLogger(StatementWriter.class); + + public static long addStatements(final AbstractTripleStore database, + final AbstractTripleStore statementStore, + final boolean copyOnly, + final IElementFilter<ISPO> filter, + final IChunkedOrderedIterator<ISPO> itr, + final IChangeLog changeLog) { + + long n = 0; + + if (itr.hasNext()) { + +// final BigdataStatementIteratorImpl itr2 = +// new BigdataStatementIteratorImpl(database, bnodes, itr) +// .start(database.getExecutorService()); +// +// final BigdataStatement[] stmts = +// new BigdataStatement[database.getChunkCapacity()]; + final SPO[] stmts = new SPO[database.getChunkCapacity()]; + + int i = 0; + while ((i = nextChunk(itr, stmts)) > 0) { + n += addStatements(database, statementStore, copyOnly, filter, + stmts, i, changeLog); + } + + } + + return n; + + } + + private static long addStatements(final AbstractTripleStore database, + final AbstractTripleStore statementStore, + final boolean copyOnly, + final IElementFilter<ISPO> filter, + final ISPO[] stmts, + final int numStmts, + final IChangeLog changeLog) { + +// final SPO[] tmp = allocateSPOs(stmts, numStmts); + + final long n = database.addStatements(statementStore, copyOnly, + new ChunkedArrayIterator<ISPO>(numStmts, stmts, + null/* keyOrder */), filter); + + // Copy the state of the isModified() flag and notify changeLog + for (int i = 0; i < numStmts; i++) { + + if (stmts[i].isModified()) { + +// stmts[i].setModified(true); + + if (changeLog != null) { + + switch(stmts[i].getModified()) { + case INSERTED: + changeLog.changeEvent(new ChangeRecord(stmts[i], ChangeAction.INSERTED)); + break; + case UPDATED: + changeLog.changeEvent(new ChangeRecord(stmts[i], ChangeAction.UPDATED)); + break; + case REMOVED: + throw new AssertionError(); + default: + break; + } + + } + + } + + } + + return n; + + } + + public static long removeStatements(final AbstractTripleStore database, + final IChunkedOrderedIterator<ISPO> itr, + final boolean computeClosureForStatementIdentifiers, + final IChangeLog changeLog) { + + long n = 0; + + if (itr.hasNext()) { + +// final BigdataStatementIteratorImpl itr2 = +// new BigdataStatementIteratorImpl(database, bnodes, itr) +// .start(database.getExecutorService()); +// +// final BigdataStatement[] stmts = +// new BigdataStatement[database.getChunkCapacity()]; + final SPO[] stmts = new SPO[database.getChunkCapacity()]; + + int i = 0; + while ((i = nextChunk(itr, stmts)) > 0) { + n += removeStatements(database, stmts, i, + computeClosureForStatementIdentifiers, changeLog); + } + + } + + return n; + + } + + private static long removeStatements(final AbstractTripleStore database, + final ISPO[] stmts, + final int numStmts, + final boolean computeClosureForStatementIdentifiers, + final IChangeLog changeLog) { + + final long n = database.removeStatements( + new ChunkedArrayIterator<ISPO>(numStmts, stmts, + null/* keyOrder */), + computeClosureForStatementIdentifiers); + + // Copy the state of the isModified() flag and notify changeLog + for (int i = 0; i < numStmts; i++) { + + if (stmts[i].isModified()) { + + // just to be safe + stmts[i].setModified(ModifiedEnum.REMOVED); + + changeLog.changeEvent( + new ChangeRecord(stmts[i], ChangeAction.REMOVED)); + + } + + } + + return n; + + } + + private static int nextChunk(final Iterator<ISPO> itr, + final ISPO[] stmts) { + + assert stmts != null && stmts.length > 0; + + int i = 0; + while (itr.hasNext()) { + stmts[i++] = itr.next(); + if (i == stmts.length) { + // stmts[] is full + return i; + } + } + + /* + * stmts[] is empty (i = 0) or partially + * full (i > 0 && i < stmts.length) + */ + return i; + + } + +// private static SPO[] allocateSPOs(final BigdataStatement[] stmts, +// final int numStmts) { +// +// final SPO[] tmp = new SPO[numStmts]; +// +// for (int i = 0; i < tmp.length; i++) { +// +// final BigdataStatement stmt = stmts[i]; +// +// final SPO spo = new SPO(stmt); +// +// if (log.isDebugEnabled()) +// log.debug("writing: " + stmt.toString() + " (" + spo + ")"); +// +// if(!spo.isFullyBound()) { +// +// throw new AssertionError("Not fully bound? : " + spo); +// +// } +// +// tmp[i] = spo; +// +// } +// +// return tmp; +// +// +// } + +} Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -29,11 +29,15 @@ import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicLong; +import com.bigdata.rdf.changesets.IChangeLog; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.model.BigdataBNode; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.ISPOAssertionBuffer; import com.bigdata.rdf.spo.JustificationWriter; @@ -101,8 +105,13 @@ * {@link Justification}s for entailments. */ protected final boolean justify; - + /** + * Used for change set notification (optional). + */ + protected final IChangeLog changeLog; + + /** * Create a buffer. * * @param focusStore @@ -126,6 +135,38 @@ AbstractTripleStore db, IElementFilter<ISPO> filter, int capacity, boolean justified) { + this(focusStore, db, filter, capacity, justified, + null/* changeLog */); + + } + + /** + * Create a buffer. + * + * @param focusStore + * The focusStore on which the entailments computed by closure + * will be written (required). This is either the database or a + * temporary focusStore used during incremental TM. + * @param db + * The database in which the terms are defined (required). + * @param filter + * Option filter. When present statements matched by the filter + * are NOT retained by the {@link SPOAssertionBuffer} and will + * NOT be added to the <i>focusStore</i>. + * @param capacity + * The maximum {@link SPO}s that the buffer can hold before it + * is {@link #flush()}ed. + * @param justified + * true iff the Truth Maintenance strategy requires that we + * focusStore {@link Justification}s for entailments. + * @param changeLog + * optional change log for change notification + */ + public SPOAssertionBuffer(AbstractTripleStore focusStore, + AbstractTripleStore db, IElementFilter<ISPO> filter, int capacity, + boolean justified, final IChangeLog changeLog + ) { + super(db, filter, capacity); if (focusStore == null) @@ -142,6 +183,8 @@ justifications = justified ? new Justification[capacity] : null; + this.changeLog = changeLog; + } /** @@ -180,12 +223,26 @@ if (numJustifications == 0) { - // batch insert statements into the focusStore. - n = db.addStatements( + if (changeLog == null) { + + // batch insert statements into the focusStore. + n = db.addStatements( focusStore, true/* copyOnly */, new ChunkedArrayIterator<ISPO>(numStmts, stmts, null/*keyOrder*/), null/*filter*/); + + } else { + + n = com.bigdata.rdf.changesets.StatementWriter.addStatements( + db, + focusStore, + true/* copyOnly */, + null/* filter */, + new ChunkedArrayIterator<ISPO>(numStmts, stmts, null/*keyOrder*/), + changeLog); + + } } else { @@ -209,7 +266,8 @@ // task will write SPOs on the statement indices. tasks.add(new StatementWriter(getTermDatabase(), focusStore, false/* copyOnly */, new ChunkedArrayIterator<ISPO>( - numStmts, stmts, null/*keyOrder*/), nwritten)); + numStmts, stmts, null/*keyOrder*/), nwritten, + changeLog)); // task will write justifications on the justifications index. final AtomicLong nwrittenj = new AtomicLong(); Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -27,6 +27,11 @@ package com.bigdata.rdf.inf; +import java.util.Map; +import com.bigdata.rdf.changesets.IChangeLog; +import com.bigdata.rdf.changesets.StatementWriter; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.model.BigdataBNode; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.SPO; import com.bigdata.rdf.store.AbstractTripleStore; @@ -49,6 +54,11 @@ private final AbstractTripleStore store; private final boolean computeClosureForStatementIdentifiers; + + /** + * Optional change log for change notification. + */ + protected final IChangeLog changeLog; /** * @param store @@ -63,6 +73,27 @@ public SPORetractionBuffer(AbstractTripleStore store, int capacity, boolean computeClosureForStatementIdentifiers) { + this(store, capacity, computeClosureForStatementIdentifiers, + null/* changeLog */); + + } + + /** + * @param store + * The database from which the statement will be removed when the + * buffer is {@link #flush()}ed. + * @param capacity + * The capacity of the retraction buffer. + * @param computeClosureForStatementIdentifiers + * See + * {@link AbstractTripleStore#removeStatements(com.bigdata.rdf.spo.ISPOIterator, boolean)} + * @param changeLog + * optional change log for change notification + */ + public SPORetractionBuffer(AbstractTripleStore store, int capacity, + boolean computeClosureForStatementIdentifiers, + final IChangeLog changeLog) { + super(store, null/*filter*/, capacity); if (store == null) @@ -72,14 +103,31 @@ this.computeClosureForStatementIdentifiers = computeClosureForStatementIdentifiers; + this.changeLog = changeLog; + } public int flush() { if (isEmpty()) return 0; - long n = store.removeStatements(new ChunkedArrayIterator<ISPO>(numStmts,stmts, + final long n; + + if (changeLog == null) { + + n = store.removeStatements(new ChunkedArrayIterator<ISPO>(numStmts,stmts, null/*keyOrder*/), computeClosureForStatementIdentifiers); + + } else { + + n = StatementWriter.removeStatements( + store, + new ChunkedArrayIterator<ISPO>( + numStmts,stmts,null/*keyOrder*/), + computeClosureForStatementIdentifiers, + changeLog); + ... [truncated message content] |