From: <tho...@us...> - 2011-01-06 20:13:53
|
Revision: 4061 http://bigdata.svn.sourceforge.net/bigdata/?rev=4061&view=rev Author: thompsonbry Date: 2011-01-06 20:13:43 +0000 (Thu, 06 Jan 2011) Log Message: ----------- Merge trunk to JOURNAL_HA_BRANCH [r3895:HEAD]. This merge brings in the change set API for the SAIL. Unit test failures in the HA branch at this time include: - TestChangeSets throws UnsupportedOperationException when running with quads. - TestNamedGraphs#testSearchQuery() fails with TestBigdataSailWithQuads (but not in the trunk). - TestBigdataSailEvaluationStrategy#test_free_text_search() fails with TestBigdataSailWithQuads, TestBigdataSailWithSids, and TestBigdataSailWithoutSids. These failures do not exist in the trunk. - BigdataSparqlTest#dataset-01, 03, 05, 06, 07, 08, 11, 12b sail with TestBigdataSailWithQuads (these test failures exist in the trunk as well). The text search related test errors were likely introduced in the JOURNAL_HA_BRANCH with some recent extensions to the SAIL free text search API. MikeP will look into these errors. Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/JOURNAL_HA_BRANCH/bigdata/src/resources/logging/log4j.properties branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataStatementImpl.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexRemover.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexWriteProc.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexWriter.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/StatementWriter.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/JOURNAL_HA_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/JOURNAL_HA_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepositoryConnection.java branches/JOURNAL_HA_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java branches/JOURNAL_HA_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java Added Paths: ----------- branches/JOURNAL_HA_BRANCH/bigdata-compatibility/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexMutation.java branches/JOURNAL_HA_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/JOURNAL_HA_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java Removed Paths: ------------- branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java Property Changed: ---------------- branches/JOURNAL_HA_BRANCH/ branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/attr/ branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/disco/ branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/util/config/ branches/JOURNAL_HA_BRANCH/bigdata-perf/ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/JOURNAL_HA_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/JOURNAL_HA_BRANCH/dsi-utils/src/java/it/ branches/JOURNAL_HA_BRANCH/dsi-utils/src/test/it/unimi/ branches/JOURNAL_HA_BRANCH/osgi/ Property changes on: branches/JOURNAL_HA_BRANCH ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/bugfix-btm:2594-2779 /trunk:2763-2785,2918-2980,3392-3437,3656-3894 + /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/bugfix-btm:2594-2779 /trunk:2763-2785,2918-2980,3392-3437,3656-3894,3896-4059 Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java 2011-01-06 19:38:57 UTC (rev 4060) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -39,6 +39,7 @@ import java.io.ObjectInput; import java.io.ObjectOutput; import java.io.OutputStream; +import java.util.Arrays; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -795,18 +796,34 @@ IResultHandler<ResultBitBuffer, ResultBitBuffer> { private final boolean[] results; + + /** + * I added this so I could encode information about tuple modification + * that takes more than one boolean to encode. For example, SPOs can + * be: INSERTED, REMOVED, UPDATED, NO_OP (2 bits). + */ + private final int multiplier; + private final AtomicInteger onCount = new AtomicInteger(); public ResultBitBufferHandler(final int nkeys) { + + this(nkeys, 1); + + } + + public ResultBitBufferHandler(final int nkeys, final int multiplier) { - results = new boolean[nkeys]; + results = new boolean[nkeys*multiplier]; + this.multiplier = multiplier; } public void aggregate(final ResultBitBuffer result, final Split split) { - System.arraycopy(result.getResult(), 0, results, split.fromIndex, - split.ntuples); + System.arraycopy(result.getResult(), 0, results, + split.fromIndex*multiplier, + split.ntuples*multiplier); onCount.addAndGet(result.getOnCount()); Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2011-01-06 19:38:57 UTC (rev 4060) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -88,22 +88,10 @@ import com.bigdata.relation.locator.IResourceLocator; import com.bigdata.resources.ResourceManager; import com.bigdata.rwstore.IAllocationContext; -import com.bigdata.service.DataService; -import com.bigdata.service.EmbeddedClient; -import com.bigdata.service.IBigdataClient; -import com.bigdata.service.IBigdataFederation; -import com.bigdata.service.jini.JiniClient; import com.bigdata.util.ChecksumUtility; /** * <p> -<<<<<<< .working - * The journal is an append-only persistence capable data structure supporting - * atomic commit, named indices, and transactions. Writes are logically appended - * to the journal to minimize disk head movement. - * </p> - * <p> -======= * The journal is a persistence capable data structure supporting atomic commit, * named indices, and full transactions. The {@link BufferMode#DiskRW} mode * provides an persistence scheme based on reusable allocation slots while the @@ -111,50 +99,13 @@ * Journals may be configured in highly available quorums. * </p> * <p> ->>>>>>> .merge-right.r3391 * This class is an abstract implementation of the {@link IJournal} interface * that does not implement the {@link IConcurrencyManager}, -<<<<<<< .working - * {@link IResourceManager}, or {@link ITransactionService} interfaces. There - * are several classes which DO support all of these features, relying on the - * {@link AbstractJournal} for their underlying persistence store. These - * include: - * <dl> - * <dt>{@link Journal}</dt> - * <dd>A concrete implementation that may be used for a standalone immortal - * database complete with concurrency control and transaction management.</dd> - * <dt>{@link DataService}</dt> - * <dd>A class supporting remote clients, key-range partitioned indices, - * concurrency, and scale-out.</dd> - * <dt>{@link IBigdataClient}</dt> - * <dd>Clients connect to an {@link IBigdataFederation}, which is the basis for - * the scale-out architecture. There are several variants of a federation - * available, including: - * <dl> - * <dt>{@link LocalDataServiceClient}</dt> - * <dd>Purely local operations against a {@link DataService} with full - * concurrency controls and transaction management</dd> - * <dt>{@link EmbeddedClient}</dt> - * <dd>Operations against a collection of services running in the same JVM with - * full concurrency controls, transaction management, and key-range partitioned - * indices.</dd> - * <dt>{@link JiniClient}</dt> - * <dd>Operations against a collection of services running on a distributed - * services framework such as Jini with full concurrency controls, transaction - * management, and key-range partitioned indices. This is the scale-out - * solution.</dd> - * </dl> - * </dd> - * </dl> - * </p> - * <h2>Limitations</h2> -======= * {@link IResourceManager}, or {@link ITransactionService} interfaces. The * {@link Journal} provides a concrete implementation that may be used for a * standalone database complete with concurrency control and transaction * management. * </p> <h2>Limitations</h2> ->>>>>>> .merge-right.r3391 * <p> * The {@link IIndexStore} implementation on this class is NOT thread-safe. The * basic limitation is that the mutable {@link BTree} is NOT thread-safe. The Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/resources/logging/log4j.properties =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/resources/logging/log4j.properties 2011-01-06 19:38:57 UTC (rev 4060) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/resources/logging/log4j.properties 2011-01-06 20:13:43 UTC (rev 4061) @@ -56,6 +56,7 @@ #log4j.logger.com.bigdata.io.WriteCacheService=TRACE #log4j.logger.com.bigdata.journal.AbstractBufferStrategy=TRACE #log4j.logger.com.bigdata.resources=INFO +#log4j.logger.com.bigdata.rwstore.RWStore=TRACE log4j.logger.com.bigdata.journal.ha.HAServer=ALL log4j.logger.com.bigdata.journal.ha.HAConnect=ALL log4j.logger.com.bigdata.journal.ha.SocketMessage=ALL @@ -64,7 +65,7 @@ #log4j.logger.com.bigdata.journal.Name2Addr=INFO #log4j.logger.com.bigdata.journal.AbstractTask=INFO #log4j.logger.com.bigdata.journal.WriteExecutorService=INFO -#log4j.logger.com.bigdata.service.AbstractTransactionService=INFO +#log4j.logger.com.bigdata.service.AbstractTransactionService=TRACE #log4j.logger.com.bigdata.journal.AbstractLocalTransactionManager=INFO log4j.logger.com.bigdata.concurrent.TxDag=WARN log4j.logger.com.bigdata.concurrent.NonBlockingLockManager=WARN Deleted: branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java =================================================================== --- trunk/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java 2011-01-05 22:42:01 UTC (rev 4059) +++ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -1,276 +0,0 @@ -/* - -Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ -/* - * Created on Nov 19, 2010 - */ -package com.bigdata.journal; - -import java.io.File; -import java.io.IOException; -import java.util.Properties; -import java.util.UUID; - -import junit.framework.TestCase2; - -import com.bigdata.Banner; -import com.bigdata.btree.IIndex; -import com.bigdata.btree.IndexMetadata; - -/** - * Test suite for binary compatibility, portability, and forward compatibility - * or automated migration of persistent stores and persistence or serialization - * capable objects across different bigdata releases. The tests in this suite - * rely on artifacts which are archived within SVN. - * - * @todo create w/ small extent and truncate (RW store does not support - * truncate). - * - * @todo test binary migration and forward compatibility. - * - * @todo stubs to create and organize artifacts,etc. - * - * @todo data driven test suite? - * - * @todo create artifact for each release, name the artifacts systematically, - * e.g., test.release.(RW|WORM).jnl or test.release.seg. Collect a list of - * the created artifacts and run each test against each of the versions of - * the artifact. - * - * @todo Force artifact file name case for file system compatibility? - * - * @todo test journal (WORM and RW), btree, index segment, row store, persistent - * data structures (checkpoints, index metadata, tuple serializers, etc.), - * RDF layer, RMI message formats, etc. - * - * @todo Specific tests for - * <p> - * Name2Addr and DefaultKeyBuilderFactory portability problem. See - * https://sourceforge.net/apps/trac/bigdata/ticket/193 - * <p> - * WORM global row store resolution problem introduced in the - * JOURNAL_HA_BRANCH. See - * https://sourceforge.net/apps/trac/bigdata/ticket/171#comment:5 - * <p> - * Sparse row store JDK encoding problem: - * https://sourceforge.net/apps/trac/bigdata/ticket/107 - */ -public class TestBinaryCompatibility extends TestCase2 { - - /** - * - */ - public TestBinaryCompatibility() { - } - - /** - * @param name - */ - public TestBinaryCompatibility(String name) { - super(name); - } - - /** - * @todo munge the release version into a name that is compatibility with - * the file system ("." to "_"). Store artifacts at each release? At - * each release in which an incompatibility is introduced? At each - * release in which a persistence capable data structure or change is - * introduced? - */ - static protected final File artifactDir = new File( - "bigdata-compatibility/src/resources/artifacts"); - - protected static class Version { - private final String version; - private final String revision; - public Version(String version,String revision) { - this.version = version; - this.revision = revision; - } - - /** - * The bigdata version number associated with the release. This is in - * the form <code>xx.yy.zz</code> - */ - public String getVersion() { - return version; - } - - /** - * The SVN repository revision associated with the release. This is in - * the form <code>####</code>. - */ - public String getRevision() { - return revision; - } - } - - /** - * Known release versions. - */ - protected static Version V_0_83_2 = new Version("0.83.2", "3349"); - - /** - * Tested Versions. - */ - protected Version[] versions = new Version[] { - V_0_83_2 - }; - - protected void setUp() throws Exception { - - Banner.banner(); - - super.setUp(); - - if (!artifactDir.exists()) { - - if (!artifactDir.mkdirs()) { - - throw new IOException("Could not create: " + artifactDir); - - } - - } - - for (Version version : versions) { - - final File versionDir = new File(artifactDir, version.getVersion()); - - if (!versionDir.exists()) { - - if (!versionDir.mkdirs()) { - - throw new IOException("Could not create: " + versionDir); - - } - - } - - } - - } - - protected void tearDown() throws Exception { - - super.tearDown(); - - } - - /** - * @throws Throwable - * - * @todo Each 'test' should run an instance of a class which knows how to - * create the appropriate artifacts and how to test them. - */ - public void test_WORM_compatibility_with_JOURNAL_HA_BRANCH() - throws Throwable { - - final Version version = V_0_83_2; - - final File versionDir = new File(artifactDir, version.getVersion()); - - final File artifactFile = new File(versionDir, getName() - + BufferMode.DiskWORM + Journal.Options.JNL); - - if (!artifactFile.exists()) { - - createArtifact(artifactFile); - - } - - verifyArtifact(artifactFile); - - } - - protected void createArtifact(final File artifactFile) throws Throwable { - - if (log.isInfoEnabled()) - log.info("Creating: " + artifactFile); - - final Properties properties = new Properties(); - - properties.setProperty(Journal.Options.FILE, artifactFile.toString()); - - properties.setProperty(Journal.Options.INITIAL_EXTENT, "" - + Journal.Options.minimumInitialExtent); - - final Journal journal = new Journal(properties); - - try { - - final IndexMetadata md = new IndexMetadata(UUID.randomUUID()); - - final IIndex ndx = journal.registerIndex("kb.spo.SPO", md); - - ndx.insert(1,1); - - journal.commit(); - - // reduce to minimum footprint. - journal.truncate(); - - } catch (Throwable t) { - - journal.destroy(); - - throw new RuntimeException(t); - - } finally { - - if (journal.isOpen()) - journal.close(); - - } - - } - - protected void verifyArtifact(final File artifactFile) throws Throwable { - - if (log.isInfoEnabled()) - log.info("Verifying: " + artifactFile); - - final Properties properties = new Properties(); - - properties.setProperty(Journal.Options.FILE, artifactFile.toString()); - - final Journal journal = new Journal(properties); - - try { - - final IIndex ndx = journal.getIndex("kb.spo.SPO"); - - assertNotNull(ndx); - - assertEquals(1,ndx.lookup(1)); - - } finally { - - journal.close(); - - } - - } - -} Copied: branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java (from rev 4059, trunk/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java) =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -0,0 +1,276 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +/* + * Created on Nov 19, 2010 + */ +package com.bigdata.journal; + +import java.io.File; +import java.io.IOException; +import java.util.Properties; +import java.util.UUID; + +import junit.framework.TestCase2; + +import com.bigdata.Banner; +import com.bigdata.btree.IIndex; +import com.bigdata.btree.IndexMetadata; + +/** + * Test suite for binary compatibility, portability, and forward compatibility + * or automated migration of persistent stores and persistence or serialization + * capable objects across different bigdata releases. The tests in this suite + * rely on artifacts which are archived within SVN. + * + * @todo create w/ small extent and truncate (RW store does not support + * truncate). + * + * @todo test binary migration and forward compatibility. + * + * @todo stubs to create and organize artifacts,etc. + * + * @todo data driven test suite? + * + * @todo create artifact for each release, name the artifacts systematically, + * e.g., test.release.(RW|WORM).jnl or test.release.seg. Collect a list of + * the created artifacts and run each test against each of the versions of + * the artifact. + * + * @todo Force artifact file name case for file system compatibility? + * + * @todo test journal (WORM and RW), btree, index segment, row store, persistent + * data structures (checkpoints, index metadata, tuple serializers, etc.), + * RDF layer, RMI message formats, etc. + * + * @todo Specific tests for + * <p> + * Name2Addr and DefaultKeyBuilderFactory portability problem. See + * https://sourceforge.net/apps/trac/bigdata/ticket/193 + * <p> + * WORM global row store resolution problem introduced in the + * JOURNAL_HA_BRANCH. See + * https://sourceforge.net/apps/trac/bigdata/ticket/171#comment:5 + * <p> + * Sparse row store JDK encoding problem: + * https://sourceforge.net/apps/trac/bigdata/ticket/107 + */ +public class TestBinaryCompatibility extends TestCase2 { + + /** + * + */ + public TestBinaryCompatibility() { + } + + /** + * @param name + */ + public TestBinaryCompatibility(String name) { + super(name); + } + + /** + * @todo munge the release version into a name that is compatibility with + * the file system ("." to "_"). Store artifacts at each release? At + * each release in which an incompatibility is introduced? At each + * release in which a persistence capable data structure or change is + * introduced? + */ + static protected final File artifactDir = new File( + "bigdata-compatibility/src/resources/artifacts"); + + protected static class Version { + private final String version; + private final String revision; + public Version(String version,String revision) { + this.version = version; + this.revision = revision; + } + + /** + * The bigdata version number associated with the release. This is in + * the form <code>xx.yy.zz</code> + */ + public String getVersion() { + return version; + } + + /** + * The SVN repository revision associated with the release. This is in + * the form <code>####</code>. + */ + public String getRevision() { + return revision; + } + } + + /** + * Known release versions. + */ + protected static Version V_0_83_2 = new Version("0.83.2", "3349"); + + /** + * Tested Versions. + */ + protected Version[] versions = new Version[] { + V_0_83_2 + }; + + protected void setUp() throws Exception { + + Banner.banner(); + + super.setUp(); + + if (!artifactDir.exists()) { + + if (!artifactDir.mkdirs()) { + + throw new IOException("Could not create: " + artifactDir); + + } + + } + + for (Version version : versions) { + + final File versionDir = new File(artifactDir, version.getVersion()); + + if (!versionDir.exists()) { + + if (!versionDir.mkdirs()) { + + throw new IOException("Could not create: " + versionDir); + + } + + } + + } + + } + + protected void tearDown() throws Exception { + + super.tearDown(); + + } + + /** + * @throws Throwable + * + * @todo Each 'test' should run an instance of a class which knows how to + * create the appropriate artifacts and how to test them. + */ + public void test_WORM_compatibility_with_JOURNAL_HA_BRANCH() + throws Throwable { + + final Version version = V_0_83_2; + + final File versionDir = new File(artifactDir, version.getVersion()); + + final File artifactFile = new File(versionDir, getName() + + BufferMode.DiskWORM + Journal.Options.JNL); + + if (!artifactFile.exists()) { + + createArtifact(artifactFile); + + } + + verifyArtifact(artifactFile); + + } + + protected void createArtifact(final File artifactFile) throws Throwable { + + if (log.isInfoEnabled()) + log.info("Creating: " + artifactFile); + + final Properties properties = new Properties(); + + properties.setProperty(Journal.Options.FILE, artifactFile.toString()); + + properties.setProperty(Journal.Options.INITIAL_EXTENT, "" + + Journal.Options.minimumInitialExtent); + + final Journal journal = new Journal(properties); + + try { + + final IndexMetadata md = new IndexMetadata(UUID.randomUUID()); + + final IIndex ndx = journal.registerIndex("kb.spo.SPO", md); + + ndx.insert(1,1); + + journal.commit(); + + // reduce to minimum footprint. + journal.truncate(); + + } catch (Throwable t) { + + journal.destroy(); + + throw new RuntimeException(t); + + } finally { + + if (journal.isOpen()) + journal.close(); + + } + + } + + protected void verifyArtifact(final File artifactFile) throws Throwable { + + if (log.isInfoEnabled()) + log.info("Verifying: " + artifactFile); + + final Properties properties = new Properties(); + + properties.setProperty(Journal.Options.FILE, artifactFile.toString()); + + final Journal journal = new Journal(properties); + + try { + + final IIndex ndx = journal.getIndex("kb.spo.SPO"); + + assertNotNull(ndx); + + assertEquals(1,ndx.lookup(1)); + + } finally { + + journal.close(); + + } + + } + +} Property changes on: branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/attr ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-jini/src/java/com/bigdata/attr:2981-3437,3656-3894 + /trunk/bigdata-jini/src/java/com/bigdata/attr:2981-3437,3656-3894,3896-4059 Property changes on: branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/disco ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-jini/src/java/com/bigdata/disco:2981-3437,3656-3894 + /trunk/bigdata-jini/src/java/com/bigdata/disco:2981-3437,3656-3894,3896-4059 Property changes on: branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/util/config ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-jini/src/java/com/bigdata/util/config:2981-3437,3656-3894 + /trunk/bigdata-jini/src/java/com/bigdata/util/config:2981-3437,3656-3894,3896-4059 Property changes on: branches/JOURNAL_HA_BRANCH/bigdata-perf ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-perf:2981-3437,3656-3894 + /trunk/bigdata-perf:2981-3437,3656-3894,3896-4059 Deleted: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2011-01-05 22:42:01 UTC (rev 4059) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -1,98 +0,0 @@ -package com.bigdata.rdf.changesets; - -import java.util.Comparator; -import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.spo.SPOComparator; - -public class ChangeRecord implements IChangeRecord { - - private final ISPO stmt; - - private final ChangeAction action; - -// private final StatementEnum oldType; - - public ChangeRecord(final ISPO stmt, final ChangeAction action) { - -// this(stmt, action, null); -// -// } -// -// public ChangeRecord(final BigdataStatement stmt, final ChangeAction action, -// final StatementEnum oldType) { -// - this.stmt = stmt; - this.action = action; -// this.oldType = oldType; - - } - - public ChangeAction getChangeAction() { - - return action; - - } - -// public StatementEnum getOldStatementType() { -// -// return oldType; -// -// } - - public ISPO getStatement() { - - return stmt; - - } - - @Override - public boolean equals(Object o) { - - if (o == this) - return true; - - if (o == null || o instanceof IChangeRecord == false) - return false; - - final IChangeRecord rec = (IChangeRecord) o; - - final ISPO stmt2 = rec.getStatement(); - - // statements are equal - if (stmt == stmt2 || - (stmt != null && stmt2 != null && stmt.equals(stmt2))) { - - // actions are equal - return action == rec.getChangeAction(); - - } - - return false; - - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - - sb.append(action).append(": ").append(stmt); - - return sb.toString(); - - } - - public static final Comparator<IChangeRecord> COMPARATOR = - new Comparator<IChangeRecord>() { - - public int compare(final IChangeRecord r1, final IChangeRecord r2) { - - final ISPO spo1 = r1.getStatement(); - final ISPO spo2 = r2.getStatement(); - - return SPOComparator.INSTANCE.compare(spo1, spo2); - - } - - }; - -} Copied: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java (from rev 4059, trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java) =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -0,0 +1,98 @@ +package com.bigdata.rdf.changesets; + +import java.util.Comparator; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.SPOComparator; + +public class ChangeRecord implements IChangeRecord { + + private final ISPO stmt; + + private final ChangeAction action; + +// private final StatementEnum oldType; + + public ChangeRecord(final ISPO stmt, final ChangeAction action) { + +// this(stmt, action, null); +// +// } +// +// public ChangeRecord(final BigdataStatement stmt, final ChangeAction action, +// final StatementEnum oldType) { +// + this.stmt = stmt; + this.action = action; +// this.oldType = oldType; + + } + + public ChangeAction getChangeAction() { + + return action; + + } + +// public StatementEnum getOldStatementType() { +// +// return oldType; +// +// } + + public ISPO getStatement() { + + return stmt; + + } + + @Override + public boolean equals(Object o) { + + if (o == this) + return true; + + if (o == null || o instanceof IChangeRecord == false) + return false; + + final IChangeRecord rec = (IChangeRecord) o; + + final ISPO stmt2 = rec.getStatement(); + + // statements are equal + if (stmt == stmt2 || + (stmt != null && stmt2 != null && stmt.equals(stmt2))) { + + // actions are equal + return action == rec.getChangeAction(); + + } + + return false; + + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + + sb.append(action).append(": ").append(stmt); + + return sb.toString(); + + } + + public static final Comparator<IChangeRecord> COMPARATOR = + new Comparator<IChangeRecord>() { + + public int compare(final IChangeRecord r1, final IChangeRecord r2) { + + final ISPO spo1 = r1.getStatement(); + final ISPO spo2 = r2.getStatement(); + + return SPOComparator.INSTANCE.compare(spo1, spo2); + + } + + }; + +} Deleted: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2011-01-05 22:42:01 UTC (rev 4059) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -1,38 +0,0 @@ -package com.bigdata.rdf.changesets; - -/** - * Provides detailed information on changes made to statements in the database. - * Change records are generated for any statements that are used in - * addStatement() or removeStatements() operations on the SAIL connection, as - * well as any inferred statements that are added or removed as a result of - * truth maintenance when the database has inference enabled. Change records - * will be sent to an instance of this class via the - * {@link #changeEvent(IChangeRecord)} method. These events will - * occur on an ongoing basis as statements are added to or removed from the - * indices. It is the change log's responsibility to collect change records. - * When the transaction is actually committed (or aborted), the change log will - * receive notification via {@link #transactionCommited()} or - * {@link #transactionAborted()}. - */ -public interface IChangeLog { - - /** - * Occurs when a statement add or remove is flushed to the indices (but - * not yet committed). - * - * @param record - * the {@link IChangeRecord} - */ - void changeEvent(final IChangeRecord record); - - /** - * Occurs when the current SAIL transaction is committed. - */ - void transactionCommited(); - - /** - * Occurs if the current SAIL transaction is aborted. - */ - void transactionAborted(); - -} Copied: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java (from rev 4059, trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java) =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -0,0 +1,38 @@ +package com.bigdata.rdf.changesets; + +/** + * Provides detailed information on changes made to statements in the database. + * Change records are generated for any statements that are used in + * addStatement() or removeStatements() operations on the SAIL connection, as + * well as any inferred statements that are added or removed as a result of + * truth maintenance when the database has inference enabled. Change records + * will be sent to an instance of this class via the + * {@link #changeEvent(IChangeRecord)} method. These events will + * occur on an ongoing basis as statements are added to or removed from the + * indices. It is the change log's responsibility to collect change records. + * When the transaction is actually committed (or aborted), the change log will + * receive notification via {@link #transactionCommited()} or + * {@link #transactionAborted()}. + */ +public interface IChangeLog { + + /** + * Occurs when a statement add or remove is flushed to the indices (but + * not yet committed). + * + * @param record + * the {@link IChangeRecord} + */ + void changeEvent(final IChangeRecord record); + + /** + * Occurs when the current SAIL transaction is committed. + */ + void transactionCommited(); + + /** + * Occurs if the current SAIL transaction is aborted. + */ + void transactionAborted(); + +} Deleted: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2011-01-05 22:42:01 UTC (rev 4059) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -1,120 +0,0 @@ -package com.bigdata.rdf.changesets; - -import com.bigdata.rdf.model.BigdataStatement; -import com.bigdata.rdf.model.StatementEnum; -import com.bigdata.rdf.spo.ISPO; - -/** - * Provides detailed information on changes made to statements in the database. - * Change records are generated for any statements that are used in - * addStatement() or removeStatements() operations on the SAIL connection, as - * well as any inferred statements that are added or removed as a result of - * truth maintenance when the database has inference enabled. - * <p> - * See {@link IChangeLog}. - */ -public interface IChangeRecord { - - /** - * Attempting to add or remove statements can have a number of different - * effects. This enum captures the different actions that can take place as - * a result of trying to add or remove a statement from the database. - */ - public enum ChangeAction { - - /** - * The focus statement was not in the database before and will be - * in the database after the commit. This can be the result of either - * explicit addStatement() operations on the SAIL connection, or from - * new inferences being generated via truth maintenance when the - * database has inference enabled. If the focus statement has a - * statement type of explicit then it was added via an addStatement() - * operation. If the focus statement has a statement type of inferred - * then it was added via truth maintenance. - */ - INSERTED, - - /** - * The focus statement was in the database before and will not - * be in the database after the commit. When the database has inference - * and truth maintenance enabled, the statement that is the focus of - * this change record was either an explicit statement that was the - * subject of a removeStatements() operation on the connection, or it - * was an inferred statement that was removed as a result of truth - * maintenance. Either way, the statement is no longer provable as an - * inference using other statements still in the database after the - * commit. If it were still provable, the explicit statement would have - * had its type changed to inferred, and the inferred statement would - * have remained untouched by truth maintenance. If an inferred - * statement was the subject of a removeStatement() operation on the - * connection it would have resulted in a no-op, since inferences can - * only be removed via truth maintenance. - */ - REMOVED, - - /** - * This change action can only occur when inference and truth - * maintenance are enabled on the database. Sometimes an attempt at - * statement addition or removal via an addStatement() or - * removeStatements() operation on the connection will result in a type - * change rather than an actual assertion or deletion. When in - * inference mode, statements can have one of three statement types: - * explicit, inferred, or axiom (see {@link StatementEnum}). There are - * several reasons why a statement will change type rather than be - * asserted or deleted: - * <p> - * <ul> - * <li> A statement is asserted, but already exists in the database as - * an inference or an axiom. The existing statement will have its type - * changed from inference or axiom to explicit. </li> - * <li> An explicit statement is retracted, but is still provable by - * other means. It will have its type changed from explicit to - * inference. </li> - * <li> An explicit statement is retracted, but is one of the axioms - * needed for inference. It will have its type changed from explicit to - * axiom. </li> - * </ul> - */ - UPDATED, - -// /** -// * This change action can occur for one of two reasons: -// * <p> -// * <ul> -// * <li> A statement is asserted, but already exists in the database as -// * an explicit statement. </li> -// * <li> An inferred statement or an axiom is retracted. Only explicit -// * statements can be retracted via removeStatements() operations. </li> -// * </ul> -// */ -// NO_OP - - } - - /** - * Return the ISPO that is the focus of this change record. - * - * @return - * the {@link ISPO} - */ - ISPO getStatement(); - - /** - * Return the change action for this change record. - * - * @return - * the {@link ChangeAction} - */ - ChangeAction getChangeAction(); - -// /** -// * If the change action is {@link ChangeAction#TYPE_CHANGE}, this method -// * will return the old statement type of the focus statement. The -// * new statement type is available on the focus statement itself. -// * -// * @return -// * the old statement type of the focus statement -// */ -// StatementEnum getOldStatementType(); - -} Copied: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java (from rev 4059, trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java) =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -0,0 +1,120 @@ +package com.bigdata.rdf.changesets; + +import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.model.StatementEnum; +import com.bigdata.rdf.spo.ISPO; + +/** + * Provides detailed information on changes made to statements in the database. + * Change records are generated for any statements that are used in + * addStatement() or removeStatements() operations on the SAIL connection, as + * well as any inferred statements that are added or removed as a result of + * truth maintenance when the database has inference enabled. + * <p> + * See {@link IChangeLog}. + */ +public interface IChangeRecord { + + /** + * Attempting to add or remove statements can have a number of different + * effects. This enum captures the different actions that can take place as + * a result of trying to add or remove a statement from the database. + */ + public enum ChangeAction { + + /** + * The focus statement was not in the database before and will be + * in the database after the commit. This can be the result of either + * explicit addStatement() operations on the SAIL connection, or from + * new inferences being generated via truth maintenance when the + * database has inference enabled. If the focus statement has a + * statement type of explicit then it was added via an addStatement() + * operation. If the focus statement has a statement type of inferred + * then it was added via truth maintenance. + */ + INSERTED, + + /** + * The focus statement was in the database before and will not + * be in the database after the commit. When the database has inference + * and truth maintenance enabled, the statement that is the focus of + * this change record was either an explicit statement that was the + * subject of a removeStatements() operation on the connection, or it + * was an inferred statement that was removed as a result of truth + * maintenance. Either way, the statement is no longer provable as an + * inference using other statements still in the database after the + * commit. If it were still provable, the explicit statement would have + * had its type changed to inferred, and the inferred statement would + * have remained untouched by truth maintenance. If an inferred + * statement was the subject of a removeStatement() operation on the + * connection it would have resulted in a no-op, since inferences can + * only be removed via truth maintenance. + */ + REMOVED, + + /** + * This change action can only occur when inference and truth + * maintenance are enabled on the database. Sometimes an attempt at + * statement addition or removal via an addStatement() or + * removeStatements() operation on the connection will result in a type + * change rather than an actual assertion or deletion. When in + * inference mode, statements can have one of three statement types: + * explicit, inferred, or axiom (see {@link StatementEnum}). There are + * several reasons why a statement will change type rather than be + * asserted or deleted: + * <p> + * <ul> + * <li> A statement is asserted, but already exists in the database as + * an inference or an axiom. The existing statement will have its type + * changed from inference or axiom to explicit. </li> + * <li> An explicit statement is retracted, but is still provable by + * other means. It will have its type changed from explicit to + * inference. </li> + * <li> An explicit statement is retracted, but is one of the axioms + * needed for inference. It will have its type changed from explicit to + * axiom. </li> + * </ul> + */ + UPDATED, + +// /** +// * This change action can occur for one of two reasons: +// * <p> +// * <ul> +// * <li> A statement is asserted, but already exists in the database as +// * an explicit statement. </li> +// * <li> An inferred statement or an axiom is retracted. Only explicit +// * statements can be retracted via removeStatements() operations. </li> +// * </ul> +// */ +// NO_OP + + } + + /** + * Return the ISPO that is the focus of this change record. + * + * @return + * the {@link ISPO} + */ + ISPO getStatement(); + + /** + * Return the change action for this change record. + * + * @return + * the {@link ChangeAction} + */ + ChangeAction getChangeAction(); + +// /** +// * If the change action is {@link ChangeAction#TYPE_CHANGE}, this method +// * will return the old statement type of the focus statement. The +// * new statement type is available on the focus statement itself. +// * +// * @return +// * the old statement type of the focus statement +// */ +// StatementEnum getOldStatementType(); + +} Deleted: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2011-01-05 22:42:01 UTC (rev 4059) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -1,163 +0,0 @@ -package com.bigdata.rdf.changesets; - -import java.util.Collection; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.Map; -import org.apache.log4j.Logger; -import com.bigdata.rdf.model.BigdataStatement; -import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.store.AbstractTripleStore; -import com.bigdata.rdf.store.BigdataStatementIterator; -import com.bigdata.striterator.ChunkedArrayIterator; - -/** - * This is a very simple implementation of a change log. NOTE: This is not - * a particularly great implementation. First of all it ends up storing - * two copies of the change set. Secondly it needs to be smarter about - * concurrency, or maybe we can be smart about it when we do the - * implementation on the other side (the SAIL connection can just write - * change events to a buffer and then the buffer can be drained by - * another thread that doesn't block the actual read/write operations, - * although then we need to be careful not to issue the committed() - * notification before the buffer is drained). - * - * @author mike - * - */ -public class InMemChangeLog implements IChangeLog { - - protected static final Logger log = Logger.getLogger(InMemChangeLog.class); - - /** - * Running tally of new changes since the last commit notification. - */ - private final Map<ISPO,IChangeRecord> changeSet = - new HashMap<ISPO, IChangeRecord>(); - - /** - * Keep a record of the change set as of the last commit. - */ - private final Map<ISPO,IChangeRecord> committed = - new HashMap<ISPO, IChangeRecord>(); - - /** - * See {@link IChangeLog#changeEvent(IChangeRecord)}. - */ - public synchronized void changeEvent(final IChangeRecord record) { - - if (log.isInfoEnabled()) - log.info(record); - - changeSet.put(record.getStatement(), record); - - } - - /** - * See {@link IChangeLog#transactionCommited()}. - */ - public synchronized void transactionCommited() { - - if (log.isInfoEnabled()) - log.info("transaction committed"); - - committed.clear(); - - committed.putAll(changeSet); - - changeSet.clear(); - - } - - /** - * See {@link IChangeLog#transactionAborted()}. - */ - public synchronized void transactionAborted() { - - if (log.isInfoEnabled()) - log.info("transaction aborted"); - - changeSet.clear(); - - } - - /** - * Return the change set as of the last commmit point. - * - * @return - * a collection of {@link IChangeRecord}s as of the last commit - * point - */ - public Collection<IChangeRecord> getLastCommit() { - - return committed.values(); - - } - - /** - * Return the change set as of the last commmit point, using the supplied - * database to resolve ISPOs to BigdataStatements. - * - * @return - * a collection of {@link IChangeRecord}s as of the last commit - * point - */ - public Collection<IChangeRecord> getLastCommit(final AbstractTripleStore db) { - - return resolve(db, committed.values()); - - } - - /** - * Use the supplied database to turn a set of ISPO change records into - * BigdataStatement change records. BigdataStatements also implement - * ISPO, the difference being that BigdataStatements also contain - * materialized RDF terms for the 3 (or 4) positions, in addition to just - * the internal identifiers (IVs) for those terms. - * - * @param db - * the database containing the lexicon needed to materialize - * the BigdataStatement objects - * @param unresolved - * the ISPO change records that came from IChangeLog notification - * events - * @return - * the fully resolves BigdataStatement change records - */ - private Collection<IChangeRecord> resolve(final AbstractTripleStore db, - final Collection<IChangeRecord> unresolved) { - - final Collection<IChangeRecord> resolved = - new LinkedList<IChangeRecord>(); - - // collect up the ISPOs out of the unresolved change records - final ISPO[] spos = new ISPO[unresolved.size()]; - int i = 0; - for (IChangeRecord rec : unresolved) { - spos[i++] = rec.getStatement(); - } - - // use the database to resolve them into BigdataStatements - final BigdataStatementIterator it = - db.asStatementIterator( - new ChunkedArrayIterator<ISPO>(i, spos, null/* keyOrder */)); - - /* - * the BigdataStatementIterator will produce BigdataStatement objects - * in the same order as the original ISPO array - */ - for (IChangeRecord rec : unresolved) { - - final BigdataStatement stmt = it.next(); - - resolved.add(new ChangeRecord(stmt, rec.getChangeAction())); - - } - - return resolved; - - } - - - -} Copied: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java (from rev 4059, trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java) =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -0,0 +1,163 @@ +package com.bigdata.rdf.changesets; + +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Map; +import org.apache.log4j.Logger; +import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.store.BigdataStatementIterator; +import com.bigdata.striterator.ChunkedArrayIterator; + +/** + * This is a very simple implementation of a change log. NOTE: This is not + * a particularly great implementation. First of all it ends up storing + * two copies of the change set. Secondly it needs to be smarter about + * concurrency, or maybe we can be smart about it when we do the + * implementation on the other side (the SAIL connection can just write + * change events to a buffer and then the buffer can be drained by + * another thread that doesn't block the actual read/write operations, + * although then we need to be careful not to issue the committed() + * notification before the buffer is drained). + * + * @author mike + * + */ +public class InMemChangeLog implements IChangeLog { + + protected static final Logger log = Logger.getLogger(InMemChangeLog.class); + + /** + * Running tally of new changes since the last commit notification. + */ + private final Map<ISPO,IChangeRecord> changeSet = + new HashMap<ISPO, IChangeRecord>(); + + /** + * Keep a record of the change set as of the last commit. + */ + private final Map<ISPO,IChangeRecord> committed = + new HashMap<ISPO, IChangeRecord>(); + + /** + * See {@link IChangeLog#changeEvent(IChangeRecord)}. + */ + public synchronized void changeEvent(final IChangeRecord record) { + + if (log.isInfoEnabled()) + log.info(record); + + changeSet.put(record.getStatement(), record); + + } + + /** + * See {@link IChangeLog#transactionCommited()}. + */ + public synchronized void transactionCommited() { + + if (log.isInfoEnabled()) + log.info("transaction committed"); + + committed.clear(); + + committed.putAll(changeSet); + + changeSet.clear(); + + } + + /** + * See {@link IChangeLog#transactionAborted()}. + */ + public synchronized void transactionAborted() { + + if (log.isInfoEnabled()) + log.info("transaction aborted"); + + changeSet.clear(); + + } + + /** + * Return the change set as of the last commmit point. + * + * @return + * a collection of {@link IChangeRecord}s as of the last commit + * point + */ + public Collection<IChangeRecord> getLastCommit() { + + return committed.values(); + + } + + /** + * Return the change set as of the last commmit point, using the supplied + * database to resolve ISPOs to BigdataStatements. + * + * @return + * a collection of {@link IChangeRecord}s as of the last commit + * point + */ + public Collection<IChangeRecord> getLastCommit(final AbstractTripleStore db) { + + return resolve(db, committed.values()); + + } + + /** + * Use the supplied database to turn a set of ISPO change records into + * BigdataStatement change records. BigdataStatements also implement + * ISPO, the difference being that BigdataStatements also contain + * materialized RDF terms for the 3 (or 4) positions, in addition to just + * the internal identifiers (IVs) for those terms. + * + * @param db + * the database containing the lexicon needed to materialize + * the BigdataStatement objects + * @param unresolved + * the ISP... [truncated message content] |