|
From: <tho...@us...> - 2010-09-28 12:43:46
|
Revision: 3658
http://bigdata.svn.sourceforge.net/bigdata/?rev=3658&view=rev
Author: thompsonbry
Date: 2010-09-28 12:43:35 +0000 (Tue, 28 Sep 2010)
Log Message:
-----------
Merge trunk to branch [r3423:3657] for QUADS_QUERY_BRANCH.
Resolved conflicts in
build.xml (ctc_utils edit was reapplied in one section of the file).
AbstractEmbeddedFederationTestCase (accepted change were some fields were changed from public to private).
Modified Paths:
--------------
branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/cache/RingBuffer.java
branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/BufferMode.java
branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/mdi/PartitionLocator.java
branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java
branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/config/NicUtil.java
branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/cache/TestRingBuffer.java
branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/concurrent/StressTestNonBlockingLockManagerWithTxDag.java
branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestAll.java
branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestTransactionService.java
branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/AbstractEmbeddedFederationTestCase.java
branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/TestMove.java
branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java
branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java
branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java
branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java
branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java
branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java
branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java
branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java
branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java
branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java
branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java
branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java
branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java
branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java
branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java
branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java
branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestOptionals.java
branches/QUADS_QUERY_BRANCH/bigdata-sails/src/samples/com/bigdata/samples/fastload.properties
branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndPipelineJoins.java
branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndPipelineJoinsWithoutInlining.java
branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java
branches/QUADS_QUERY_BRANCH/build.xml
branches/QUADS_QUERY_BRANCH/src/resources/config/bigdataCluster.config
branches/QUADS_QUERY_BRANCH/src/resources/config/bigdataCluster16.config
branches/QUADS_QUERY_BRANCH/src/resources/config/bigdataStandalone.config
Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/cache/RingBuffer.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/cache/RingBuffer.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/cache/RingBuffer.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -154,7 +154,7 @@
public boolean add(final T ref) throws IllegalStateException {
if (ref == null)
- throw new IllegalArgumentException();
+ throw new NullPointerException();
beforeOffer( ref );
@@ -178,7 +178,7 @@
public boolean offer(final T ref) {
if (ref == null)
- throw new IllegalArgumentException();
+ throw new NullPointerException();
beforeOffer( ref );
@@ -387,12 +387,12 @@
if (index < 0 || index >= size)
throw new IllegalArgumentException();
- if (index + 1 == size) {
-
- // remove the LRU position.
- return remove();
-
- }
+// if (index + 1 == size) {
+//
+// // remove the LRU position.
+// return remove();
+//
+// }
/*
* Otherwise we are removing some non-LRU element.
@@ -409,7 +409,7 @@
for (;;) {
- int nexti = (i + 1) % capacity; // update index.
+ final int nexti = (i + 1) % capacity; // update index.
if (nexti != head) {
@@ -491,10 +491,9 @@
*/
final public boolean scanHead(final int nscan, final T ref) {
- assert nscan > 0;
-// if (nscan <= 0)
-// throw new IllegalArgumentException();
-//
+ if (nscan <= 0)
+ throw new IllegalArgumentException();
+
if (ref == null)
throw new IllegalArgumentException();
@@ -581,6 +580,9 @@
public boolean contains(final Object ref) {
+ if (ref == null)
+ throw new NullPointerException();
+
// MRU to LRU scan.
for (int n = 0, i = tail; n < size; n++) {
@@ -601,7 +603,8 @@
throw new NullPointerException();
if (c == this)
- throw new IllegalArgumentException();
+ return true;
+// throw new IllegalArgumentException();
for( Object e : c ) {
Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/BufferMode.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/BufferMode.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/BufferMode.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -59,15 +59,16 @@
Transient(false/* stable */, true/* fullyBuffered */),
/**
+ * <strong>This mode is not being actively developed and should not be used
+ * outside of unit tests.</strong>
* <p>
- * A direct buffer is allocated for the file image. Writes are applied
- * to the buffer. The buffer tracks dirty slots regardless of the
- * transaction that wrote them and periodically writes dirty slots
- * through to disk. On commit, any dirty index or allocation nodes are
- * written onto the buffer and all dirty slots on the buffer. Dirty
- * slots in the buffer are then synchronously written to disk, the
- * appropriate root block is updated, and the file is (optionally)
- * flushed to disk.
+ * A direct buffer is allocated for the file image. Writes are applied to
+ * the buffer. The buffer tracks dirty slots regardless of the transaction
+ * that wrote them and periodically writes dirty slots through to disk. On
+ * commit, any dirty index or allocation nodes are written onto the buffer
+ * and all dirty slots on the buffer. Dirty slots in the buffer are then
+ * synchronously written to disk, the appropriate root block is updated, and
+ * the file is (optionally) flushed to disk.
* </p>
* <p>
* This option offers wires an image of the journal file into memory and
@@ -79,6 +80,9 @@
Direct(true/* stable */, true/* fullyBuffered */),
/**
+ * <strong>This mode is not being actively developed and should not be used
+ * outside of unit tests. Memory mapped IO has the fatal weakness under Java
+ * that you can not reliably close or extend the backing file.</strong>
* <p>
* A memory-mapped buffer is allocated for the file image. Writes are
* applied to the buffer. Reads read from the buffer. On commit, the map is
Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/mdi/PartitionLocator.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/mdi/PartitionLocator.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/mdi/PartitionLocator.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -185,8 +185,17 @@
}
- // Note: used by assertEquals in the test cases.
- public boolean equals(Object o) {
+ /*
+ * @todo There are some unit tests which depend on this implementation of
+ * equals. However, since the partition locator Id for a given scale out
+ * index SHOULD be immutable, running code can rely on partitionId ==
+ * o.partitionId. Therefore the unit tests should be modified to extract an
+ * "assertSamePartitionLocator" method and rely on that. We could then
+ * simplify this method to just test the partitionId. That would reduce the
+ * effort when maintaining hash tables based on the PartitionLocator since
+ * we would not be comparing the keys, UUIDs, etc.
+ */
+ public boolean equals(final Object o) {
if (this == o)
return true;
Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -632,7 +632,7 @@
if (vmd.mergePriority > 0 || forceCompactingMerges) {
- if(forceCompactingMerges && vmd.getAction().equals(OverflowActionEnum.Copy)) {
+ if(forceCompactingMerges && OverflowActionEnum.Copy.equals(vmd.getAction())) {
vmd.clearCopyAction();
Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/config/NicUtil.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/config/NicUtil.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/config/NicUtil.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -26,28 +26,20 @@
package com.bigdata.util.config;
import java.io.IOException;
-import java.net.InetAddress;
import java.net.Inet4Address;
+import java.net.InetAddress;
import java.net.InterfaceAddress;
-import java.net.MalformedURLException;
import java.net.NetworkInterface;
import java.net.SocketException;
import java.net.UnknownHostException;
+import java.util.Collections;
+import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.Enumeration;
-import java.util.Collections;
-import java.util.logging.LogRecord;
import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import net.jini.config.Configuration;
-import net.jini.config.ConfigurationException;
-import com.sun.jini.config.Config;
-import com.sun.jini.logging.Levels;
-
/**
* Utility class that provides a set of static convenience methods
* related to processing information about the current node's Network
@@ -400,34 +392,34 @@
return macAddr;
}
- /**
- * Three-argument version of <code>getInetAddress</code> that retrieves
- * the desired interface name from the given <code>Configuration</code>
- * parameter.
- */
- public static InetAddress getInetAddress(Configuration config,
- String componentName,
- String nicNameEntry)
- {
- String nicName = "NoNetworkInterfaceName";
- try {
- nicName = (String)Config.getNonNullEntry(config,
- componentName,
- nicNameEntry,
- String.class,
- "eth0");
- } catch(ConfigurationException e) {
- jiniConfigLogger.log(WARNING, e
- +" - [componentName="+componentName
- +", nicNameEntry="+nicNameEntry+"]");
- utilLogger.log(Level.WARN, e
- +" - [componentName="+componentName
- +", nicNameEntry="+nicNameEntry+"]");
- e.printStackTrace();
- return null;
- }
- return ( getInetAddress(nicName, 0, null, false) );
- }
+// /**
+// * Three-argument version of <code>getInetAddress</code> that retrieves
+// * the desired interface name from the given <code>Configuration</code>
+// * parameter.
+// */
+// public static InetAddress getInetAddress(Configuration config,
+// String componentName,
+// String nicNameEntry)
+// {
+// String nicName = "NoNetworkInterfaceName";
+// try {
+// nicName = (String)Config.getNonNullEntry(config,
+// componentName,
+// nicNameEntry,
+// String.class,
+// "eth0");
+// } catch(ConfigurationException e) {
+// jiniConfigLogger.log(WARNING, e
+// +" - [componentName="+componentName
+// +", nicNameEntry="+nicNameEntry+"]");
+// utilLogger.log(Level.WARN, e
+// +" - [componentName="+componentName
+// +", nicNameEntry="+nicNameEntry+"]");
+// e.printStackTrace();
+// return null;
+// }
+// return ( getInetAddress(nicName, 0, null, false) );
+// }
// What follows are a number of versions of the getIpAddress method
// provided for convenience.
Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/cache/TestRingBuffer.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/cache/TestRingBuffer.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/cache/TestRingBuffer.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -28,9 +28,7 @@
package com.bigdata.cache;
-import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
@@ -65,7 +63,7 @@
public void test_ctor() {
try {
- new RingBuffer(0);
+ new RingBuffer<String>(0);
fail("Expecting: " + IllegalArgumentException.class);
} catch (IllegalArgumentException ex) {
if (log.isInfoEnabled())
@@ -73,14 +71,14 @@
}
try {
- new RingBuffer(-1);
+ new RingBuffer<String>(-1);
fail("Expecting: " + IllegalArgumentException.class);
} catch (IllegalArgumentException ex) {
if (log.isInfoEnabled())
log.info("Ignoring excepted exception: " + ex);
}
- final RingBuffer b = new RingBuffer(1);
+ final RingBuffer<String> b = new RingBuffer<String>(1);
assertEquals("capacity", 1, b.capacity());
assertEquals("size", 0, b.size());
@@ -304,8 +302,6 @@
* remove(0) : [ _, _, _ ] : head=0; tail=0; size=0, returns [c] (empty, head==tail)
* </pre>
*
- * @todo must also test when remove not at the tail!
- *
* When removing the tail, head := (head-1) % capacity.
*/
public void test_removeNth() {
@@ -313,7 +309,7 @@
final String a = "a";
final String b = "b";
final String c = "c";
- final String d = "d";
+// final String d = "d";
final RingBuffer<String> buffer = new RingBuffer<String>(3);
@@ -429,8 +425,8 @@
try {
buffer.add(null);
- fail("Expecting: " + IllegalArgumentException.class);
- } catch (IllegalArgumentException ex) {
+ fail("Expecting: " + NullPointerException.class);
+ } catch (NullPointerException ex) {
if (log.isInfoEnabled())
log.info("Ignoring expected exception: " + ex);
}
@@ -442,8 +438,8 @@
try {
buffer.offer(null);
- fail("Expecting: " + IllegalArgumentException.class);
- } catch (IllegalArgumentException ex) {
+ fail("Expecting: " + NullPointerException.class);
+ } catch (NullPointerException ex) {
if (log.isInfoEnabled())
log.info("Ignoring expected exception: " + ex);
}
@@ -619,9 +615,9 @@
public void test_toArray1_nonempty() {
Object [] intArr = new Object[] {
- new Integer(1),
- new Integer(2),
- new Integer(3)
+ Integer.valueOf(1),
+ Integer.valueOf(2),
+ Integer.valueOf(3)
};
final RingBuffer<Object> buffer = new RingBuffer<Object>(intArr.length);
buffer.addAll(Arrays.asList(intArr));
@@ -631,9 +627,9 @@
public void test_toArray1_nonempty_oversized() {
Object [] intArr = new Object[] {
- new Integer(1),
- new Integer(2),
- new Integer(3)
+ Integer.valueOf(1),
+ Integer.valueOf(2),
+ Integer.valueOf(3)
};
final RingBuffer<Object> buffer = new RingBuffer<Object>(intArr.length);
buffer.addAll(Arrays.asList(intArr));
@@ -685,7 +681,7 @@
// see https://sourceforge.net/apps/trac/bigdata/ticket/101
public void test_remove_get_order() {
- String[] expected = new String[] {
+ final String[] expected = new String[] {
"a", "b", "c", "d"
};
final RingBuffer<String> b = new RingBuffer<String>(expected.length);
@@ -698,8 +694,8 @@
//Remove entries in MRU to LRU order -- differs from javadoc order
for (int i=(expected.length-1); i >= 0; i--) {
- String getString = b.get(i);
- String removeString = b.remove(i);
+ final String getString = b.get(i);
+ final String removeString = b.remove(i);
assertSame(getString, removeString);
}
assertTrue(b.isEmpty());
@@ -973,13 +969,10 @@
assertTrue(b.contains("c"));
}
- //TODO - check for exception on contains(null) once implemented
-
-
- public void test_contains_all_null() {
- final RingBuffer<String> b = new RingBuffer<String>(1);
+ public void test_contains_null() {
+ final RingBuffer<String> b = new RingBuffer<String>(1);
try {
- b.containsAll(null);
+ b.contains(null);
fail("Expecting: " + NullPointerException.class);
} catch (NullPointerException ex) {
if (log.isInfoEnabled())
@@ -987,16 +980,29 @@
}
}
- public void test_contains_all_this() {
+ public void test_contains_all_null() {
final RingBuffer<String> b = new RingBuffer<String>(1);
try {
- b.containsAll(b);
- fail("Expecting: " + IllegalArgumentException.class);
- } catch (IllegalArgumentException ex) {
+ b.containsAll(null);
+ fail("Expecting: " + NullPointerException.class);
+ } catch (NullPointerException ex) {
if (log.isInfoEnabled())
log.info("Ignoring excepted exception: " + ex);
}
}
+
+ public void test_contains_all_this() {
+ final RingBuffer<String> b = new RingBuffer<String>(1);
+ // Note: This is a tautology.
+ assertTrue(b.containsAll(b));
+// try {
+// b.containsAll(b);
+// fail("Expecting: " + IllegalArgumentException.class);
+// } catch (IllegalArgumentException ex) {
+// if (log.isInfoEnabled())
+// log.info("Ignoring excepted exception: " + ex);
+// }
+ }
public void test_contains_all_empty() {
final RingBuffer<String> b = new RingBuffer<String>(1);
Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/concurrent/StressTestNonBlockingLockManagerWithTxDag.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/concurrent/StressTestNonBlockingLockManagerWithTxDag.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/concurrent/StressTestNonBlockingLockManagerWithTxDag.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -234,7 +234,7 @@
final int nhorriddeath = Integer.parseInt(result.get("nhorriddeath"));
- // all tasks were either successfull or a died a horrid death.
+ // all tasks were either successful or a died a horrid death.
assertEquals(ntasks, nsuccess + nhorriddeath);
/*
@@ -243,9 +243,14 @@
* scheduled to die is random.
*/
final double actualErrorRate = nhorriddeath / (double) ntasks;
-
+
+ /*
+ * Note: I've increased the upper bound on the allowed error rate a bit
+ * since the CI builds were occasionally failing this with an actual
+ * error rate which was quite reasonable, e.g., .16.
+ */
if ((actualErrorRate < expectedErrorRate - .05)
- || (actualErrorRate > expectedErrorRate + .05)) {
+ || (actualErrorRate > expectedErrorRate + .1)) {
fail("error rate: expected=" + expectedErrorRate + ", actual="
+ actualErrorRate);
Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestAll.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestAll.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestAll.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -93,7 +93,25 @@
suite.addTest( TestTransientJournal.suite() );
- suite.addTest( TestDirectJournal.suite() );
+ /*
+ * Commented out since this mode is not used and there is an occasional
+ * test failure in:
+ *
+ * com.bigdata.journal.TestConcurrentJournal.test_concurrentReadersAreOk
+ *
+ * This error is stochastic and appears to be restricted to
+ * BufferMode#Direct. This is a journal mode based by a fixed capacity
+ * native ByteBuffer serving as a write through cache to the disk. Since
+ * the buffer can not be extended, that journal mode is not being
+ * excercised by anything. If you like, I can deprecate the Direct
+ * BufferMode and turn disable its test suite. (There is also a "Mapped"
+ * BufferMode whose tests we are not running due to problems with Java
+ * releasing native heap ByteBuffers and closing memory mapped files.
+ * Its use is strongly discouraged in the javadoc, but it has not been
+ * excised from the code since it might be appropriate for some
+ * applications.)
+ */
+// suite.addTest( TestDirectJournal.suite() );
/*
* Note: The mapped journal is somewhat problematic and its tests are
Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestTransactionService.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestTransactionService.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestTransactionService.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -40,6 +40,7 @@
import com.bigdata.service.AbstractTransactionService;
import com.bigdata.service.CommitTimeIndex;
import com.bigdata.service.TxServiceRunState;
+import com.bigdata.util.MillisecondTimestampFactory;
/**
* Unit tests of the {@link AbstractTransactionService} using a mock client.
@@ -259,6 +260,24 @@
}
+ /**
+ * FIXME This currently waits until at least two milliseconds have
+ * elapsed. This is a workaround for
+ * {@link TestTransactionService#test_newTx_readOnly()} until <a href=
+ * "https://sourceforge.net/apps/trac/bigdata/ticket/145" >ISSUE#145
+ * </a> is resolved. This override of {@link #nextTimestamp()} should
+ * be removed once that issue is fixed.
+ */
+ @Override
+ public long nextTimestamp() {
+
+ // skip at least one millisecond.
+ MillisecondTimestampFactory.nextMillis();
+
+ return MillisecondTimestampFactory.nextMillis();
+
+ }
+
}
/**
@@ -596,17 +615,25 @@
* GT the lastCommitTime since that could allow data not yet committed to
* become visible during the transaction (breaking isolation).
* <p>
- * A commitTime is identified by looking up the callers timestamp in a log of
- * the historical commit times and returning the first historical commit
+ * A commitTime is identified by looking up the callers timestamp in a log
+ * of the historical commit times and returning the first historical commit
* time LTE the callers timestamp.
* <p>
* The transaction start time is then chosen from the half-open interval
* <i>commitTime</i> (inclusive lower bound) : <i>nextCommitTime</i>
* (exclusive upper bound).
*
- * @throws IOException
+ * @throws IOException
*
- * @todo This test fails occasionally. I have not figured out why yet. BBT
+ * @todo This test fails occasionally. This occurs if the timestamps
+ * assigned by the {@link MockTransactionService} are only 1 unit
+ * apart. When that happens, there are not enough distinct values
+ * available to allow 2 concurrent read-only transactions. See <a
+ * href=
+ * "https://sourceforge.net/apps/trac/bigdata/ticket/145">ISSUE#145
+ * </a>. Also see {@link MockTransactionService#nextTimestamp()}
+ * which has been overridden to guarantee that there are at least
+ * two distinct values such that this test will pass.
*/
public void test_newTx_readOnly() throws IOException {
Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/AbstractEmbeddedFederationTestCase.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/AbstractEmbeddedFederationTestCase.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/AbstractEmbeddedFederationTestCase.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -78,11 +78,11 @@
super(arg0);
}
- public IBigdataClient<?> client;
- public IBigdataFederation<?> fed;
- public IMetadataService metadataService;
- public IDataService dataService0;
- public IDataService dataService1;
+ protected IBigdataClient<?> client;
+ protected IBigdataFederation<?> fed;
+ protected IMetadataService metadataService;
+ protected IDataService dataService0;
+ protected IDataService dataService1;
public Properties getProperties() {
@@ -356,9 +356,10 @@
* FIXME You can change this constant if you are debugging so that
* the test will not terminate too soon, but change it back so that
* the test will terminate quickly when run automatically. The value
- * should be [2000] ms.
+ * should be only a few seconds. 2000 ms is sometimes to little, so
+ * I have raised this value to 5000 ms.
*/
- if (elapsed > 2000) {
+ if (elapsed > 5000) {
fail("No overflow after " + elapsed + "ms?");
Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/TestMove.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/TestMove.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/TestMove.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -360,7 +360,7 @@
* Set flag to force overflow on group commit.
*/
dataService0
- .forceOverflow(false/* immediate */, false/* compactingMerge */);
+ .forceOverflow(false/* immediate */, true/* compactingMerge */);
// insert the data into the scale-out index.
fed.getIndex(name, ITx.UNISOLATED)
@@ -395,7 +395,7 @@
int ndataService0 = 0;// #of index partitions on data service 0.
int ndataService1 = 0;// #of index partitions on data service 1.
- final ITupleIterator itr = new RawDataServiceTupleIterator(
+ final ITupleIterator<?> itr = new RawDataServiceTupleIterator(
fed.getMetadataService(),//
MetadataService.getMetadataIndexName(name), //
ITx.READ_COMMITTED,//
Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -194,20 +194,34 @@
*
* @throws InterruptedException
* @throws ExecutionException
+ *
+ * @todo This test now logs a warning rather than failing pending resolution
+ * of https://sourceforge.net/apps/trac/bigdata/ticket/147
*/
public void test_stress_startWriteStop2() throws InterruptedException,
ExecutionException {
- for (int i = 0; i < 10000; i++) {
+ final int LIMIT = 10000;
+ int nerr = 0;
+ for (int i = 0; i < LIMIT; i++) {
try {
doStartWriteStop2Test();
} catch (Throwable t) {
- fail("Pass#=" + i, t);
+ // fail("Pass#=" + i, t);
+ log.warn("Would have failed: pass#=" + i + ", cause=" + t);
+ nerr++;
}
}
+ if (nerr > 0) {
+
+ log.error("Test would have failed: nerrs=" + nerr + " out of "
+ + LIMIT + " trials");
+
+ }
+
}
/**
Modified: branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -34,7 +34,6 @@
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.io.Writer;
-import java.net.InetAddress;
import java.util.Arrays;
import java.util.Date;
import java.util.Enumeration;
@@ -131,8 +130,6 @@
public final Properties properties;
public final String[] jiniOptions;
- private final String serviceIpAddr;
-
protected void toString(StringBuilder sb) {
super.toString(sb);
@@ -178,12 +175,6 @@
} else {
log.warn("groups = " + Arrays.toString(this.groups));
}
-
- try {
- this.serviceIpAddr = NicUtil.getIpAddress("default.nic", "default", false);
- } catch(IOException e) {
- throw new ConfigurationException(e.getMessage(), e);
- }
}
/**
@@ -480,6 +471,9 @@
final ServiceDir serviceDir = new ServiceDir(this.serviceDir);
+ String serviceIpAddr = NicUtil.getIpAddress ( "default.nic", "default", false ) ;
+ if ( null == serviceIpAddr )
+ throw new IOException ( "Can't get a host ip address" ) ;
final Hostname hostName = new Hostname(serviceIpAddr);
final ServiceUUID serviceUUID = new ServiceUUID(this.serviceUUID);
Modified: branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -85,7 +85,7 @@
hostname = NicUtil.getIpAddress("default.nic", "default", false);
} catch(Throwable t) {//for now, maintain same failure logic as used previously
t.printStackTrace();
- s = NicUtil.getIpAddressByLocalHost();
+ hostname = NicUtil.getIpAddressByLocalHost();
}
QUERY_TEST_RESULT_FILE = hostname + "-result.txt";
} else {
Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -272,7 +272,18 @@
//
// /** {@value #DEFAULT_MAX_TRIES} */
// int DEFAULT_MAX_TRIES = 3;
-
+
+ /**
+ * The value that will be used for the graph/context co-ordinate when
+ * loading data represented in a triple format into a quad store.
+ */
+ String DEFAULT_GRAPH = "defaultGraph" ;
+
+ /**
+ * TODO Should we always enforce a real value? i.e. provide a real default
+ * or abort the load.
+ */
+ String DEFAULT_DEFAULT_GRAPH = null ;
}
/**
@@ -402,6 +413,12 @@
private transient RDFFormat rdfFormat;
/**
+ * The value that will be used for the graph/context co-ordinate when
+ * loading data represented in a triple format into a quad store.
+ */
+ public final String defaultGraph ;
+
+ /**
* Force the load of the NxParser integration class and its registration
* of the NQuadsParser#nquads RDFFormat.
*
@@ -496,6 +513,8 @@
sb.append(", " + ConfigurationOptions.RDF_FORMAT + "=" + rdfFormat);
+ sb.append(", " + ConfigurationOptions.DEFAULT_GRAPH + "=" + defaultGraph) ;
+
sb.append(", " + ConfigurationOptions.FORCE_OVERFLOW_BEFORE_CLOSURE + "="
+ forceOverflowBeforeClosure);
@@ -601,6 +620,10 @@
}
+ defaultGraph = (String) config.getEntry(component,
+ ConfigurationOptions.DEFAULT_GRAPH, String.class,
+ ConfigurationOptions.DEFAULT_DEFAULT_GRAPH);
+
rejectedExecutionDelay = (Long) config.getEntry(
component,
ConfigurationOptions.REJECTED_EXECUTION_DELAY, Long.TYPE,
@@ -979,6 +1002,7 @@
jobState.ontology,//file
jobState.ontology.getPath(),//baseURI
jobState.getRDFFormat(),//
+ jobState.defaultGraph,
jobState.ontologyFileFilter //
);
Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -223,6 +223,7 @@
jobState.valuesInitialCapacity,//
jobState.bnodesInitialCapacity,//
jobState.getRDFFormat(), //
+ jobState.defaultGraph,
parserOptions,//
false, // deleteAfter is handled by the master!
jobState.parserPoolSize, //
Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -186,7 +186,7 @@
// run the parser.
// @todo reuse the same underlying parser instance?
- loader.loadRdf(reader, baseURL, rdfFormat, parserOptions);
+ loader.loadRdf(reader, baseURL, rdfFormat, null, parserOptions);
success = true;
Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -356,8 +356,14 @@
* The default {@link RDFFormat}.
*/
private final RDFFormat defaultFormat;
-
+
/**
+ * The value that will be used for the graph/context co-ordinate when
+ * loading data represented in a triple format into a quad store.
+ */
+ private final String defaultGraph;
+
+ /**
* Options for the {@link RDFParser}.
*/
private final RDFParserOptions parserOptions;
@@ -1423,7 +1429,7 @@
try {
// run the parser.
new PresortRioLoader(buffer).loadRdf(reader, baseURL,
- rdfFormat, parserOptions);
+ rdfFormat, defaultGraph, parserOptions);
} finally {
reader.close();
}
@@ -1490,6 +1496,9 @@
* {@link BNode}s parsed from a single document.
* @param defaultFormat
* The default {@link RDFFormat} which will be assumed.
+ * @param defaultGraph
+ * The value that will be used for the graph/context co-ordinate when
+ * loading data represented in a triple format into a quad store.
* @param parserOptions
* Options for the {@link RDFParser}.
* @param deleteAfter
@@ -1529,6 +1538,7 @@
final int valuesInitialCapacity,//
final int bnodesInitialCapacity, //
final RDFFormat defaultFormat,//
+ final String defaultGraph,//
final RDFParserOptions parserOptions,//
final boolean deleteAfter,//
final int parserPoolSize,//
@@ -1566,6 +1576,8 @@
this.defaultFormat = defaultFormat;
+ this.defaultGraph = defaultGraph;
+
this.parserOptions = parserOptions;
this.deleteAfter = deleteAfter;
Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -37,6 +37,8 @@
import org.openrdf.rio.RDFParser;
import org.openrdf.rio.Rio;
+import com.bigdata.rdf.model.BigdataURI;
+
/**
* Parses data but does not load it into the indices.
*
@@ -74,6 +76,8 @@
private final ValueFactory valueFactory;
+ protected String defaultGraph;
+
public BasicRioLoader(final ValueFactory valueFactory) {
if (valueFactory == null)
@@ -153,18 +157,20 @@
}
final public void loadRdf(final InputStream is, final String baseURI,
- final RDFFormat rdfFormat, final RDFParserOptions options)
+ final RDFFormat rdfFormat, final String defaultGraph,
+ final RDFParserOptions options)
throws Exception {
- loadRdf2(is, baseURI, rdfFormat, options);
+ loadRdf2(is, baseURI, rdfFormat, defaultGraph, options);
}
final public void loadRdf(final Reader reader, final String baseURI,
- final RDFFormat rdfFormat, final RDFParserOptions options)
+ final RDFFormat rdfFormat, final String defaultGraph,
+ final RDFParserOptions options)
throws Exception {
- loadRdf2(reader, baseURI, rdfFormat, options);
+ loadRdf2(reader, baseURI, rdfFormat, defaultGraph, options);
}
@@ -180,7 +186,7 @@
* @throws Exception
*/
protected void loadRdf2(final Object source, final String baseURI,
- final RDFFormat rdfFormat, final RDFParserOptions options)
+ final RDFFormat rdfFormat, final String defaultGraph, final RDFParserOptions options)
throws Exception {
if (source == null)
@@ -198,6 +204,8 @@
if (log.isInfoEnabled())
log.info("format=" + rdfFormat + ", options=" + options);
+ this.defaultGraph = defaultGraph ;
+
final RDFParser parser = getParser(rdfFormat);
// apply options to the parser
@@ -212,7 +220,7 @@
// Note: reset so that rates are correct for each source loaded.
stmtsAdded = 0;
-
+
try {
before();
Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -72,12 +72,14 @@
* The base URL for those data.
* @param rdfFormat
* The interchange format.
+ * @param defaultGraph
+ * The default graph.
* @param options
* Options to be applied to the {@link RDFParser}.
* @throws Exception
*/
public void loadRdf(Reader reader, String baseURL, RDFFormat rdfFormat,
- RDFParserOptions options) throws Exception;
+ String defaultGraph, RDFParserOptions options) throws Exception;
/**
* Parse RDF data.
@@ -88,11 +90,13 @@
* The base URL for those data.
* @param rdfFormat
* The interchange format.
+ * @param defaultGraph
+ * The default graph.
* @param options
* Options to be applied to the {@link RDFParser}.
* @throws Exception
*/
public void loadRdf(InputStream is, String baseURI, RDFFormat rdfFormat,
- RDFParserOptions options) throws Exception;
+ String defaultGraph, RDFParserOptions options) throws Exception;
}
Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -23,11 +23,14 @@
*/
package com.bigdata.rdf.rio;
+import org.openrdf.model.Resource;
import org.openrdf.model.Statement;
import org.openrdf.model.Value;
import org.openrdf.rio.RDFHandler;
import org.openrdf.rio.RDFHandlerException;
+import com.bigdata.rdf.model.BigdataURI;
+
/**
* Statement handler for the RIO RDF Parser that writes on a
* {@link StatementBuffer}.
@@ -45,6 +48,12 @@
final protected IStatementBuffer<?> buffer;
/**
+ * The value that will be used for the graph/context co-ordinate when
+ * loading data represented in a triple format into a quad store.
+ */
+ private BigdataURI defaultGraphURI = null ;
+
+ /**
* Sets up parser to load RDF.
*
* @param buffer
@@ -58,7 +67,7 @@
this.buffer = buffer;
}
-
+
/**
* bulk insert the buffered data into the store.
*/
@@ -87,8 +96,11 @@
public RDFHandler newRDFHandler() {
+ defaultGraphURI = null != defaultGraph && 4 == buffer.getDatabase ().getSPOKeyArity ()
+ ? buffer.getDatabase ().getValueFactory ().createURI ( defaultGraph )
+ : null
+ ;
return this;
-
}
public void handleStatement( final Statement stmt ) {
@@ -98,9 +110,13 @@
log.debug(stmt);
}
-
+
+ Resource graph = stmt.getContext() ;
+ if ( null == graph
+ && null != defaultGraphURI ) // only true when we know we are loading a quad store
+ graph = defaultGraphURI ;
// buffer the write (handles overflow).
- buffer.add( stmt.getSubject(), stmt.getPredicate(), stmt.getObject(), stmt.getContext() );
+ buffer.add( stmt.getSubject(), stmt.getPredicate(), stmt.getObject(), graph );
stmtsAdded++;
Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -558,11 +558,18 @@
final int p = this.p.hashCode();
final int o = this.o.hashCode();
-
- // Note: historical behavior was (s,p,o) based hash.
- hashCode = 961 * ((int) (s ^ (s >>> 32))) + 31
- * ((int) (p ^ (p >>> 32))) + ((int) (o ^ (o >>> 32)));
+ /*
+ * Note: The historical behavior was based on the int64 term
+ * identifiers. Since the hash code is now computed from the int32
+ * hash codes of the (s,p,o) IV objects, the original bit math was
+ * resulting in a hash code which was always zero (any 32 bit value
+ * shifted right by 32 bits is zero).
+ */
+ hashCode = 961 * s + 31 * p + o;
+// hashCode = 961 * ((int) (s ^ (s >>> 32))) + 31
+// * ((int) (p ^ (p >>> 32))) + ((int) (o ^ (o >>> 32)));
+
}
return hashCode;
Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -640,7 +640,7 @@
final LoadStats totals = new LoadStats();
- loadData3(totals, reader, baseURL, rdfFormat, true/*endOfBatch*/);
+ loadData3(totals, reader, baseURL, rdfFormat, null, true/*endOfBatch*/);
return totals;
@@ -668,7 +668,7 @@
final LoadStats totals = new LoadStats();
- loadData3(totals, is, baseURL, rdfFormat, true/* endOfBatch */);
+ loadData3(totals, is, baseURL, rdfFormat, null, true/* endOfBatch */);
return totals;
@@ -704,7 +704,7 @@
final LoadStats totals = new LoadStats();
- loadData3(totals, is, baseURL, rdfFormat, true/*endOfBatch*/);
+ loadData3(totals, is, baseURL, rdfFormat, null, true/*endOfBatch*/);
return totals;
@@ -762,7 +762,7 @@
if(file.exists()) {
loadFiles(totals, 0/* depth */, file, baseURL,
- rdfFormat, filter, endOfBatch);
+ rdfFormat, null, filter, endOfBatch);
return;
@@ -789,7 +789,7 @@
try {
- loadData3(totals, reader, baseURL, rdfFormat, endOfBatch);
+ loadData3(totals, reader, baseURL, rdfFormat, null, endOfBatch);
} catch (Exception ex) {
@@ -817,6 +817,9 @@
* The format of the file (optional, when not specified the
* format is deduced for each file in turn using the
* {@link RDFFormat} static methods).
+ * @param defaultGraph
+ * The value that will be used for the graph/context co-ordinate when
+ * loading data represented in a triple format into a quad store.
* @param filter
* A filter selecting the file names that will be loaded
* (optional). When specified, the filter MUST accept directories
@@ -827,7 +830,8 @@
* @throws IOException
*/
public LoadStats loadFiles(final File file, final String baseURI,
- final RDFFormat rdfFormat, final FilenameFilter filter)
+ final RDFFormat rdfFormat, final String defaultGraph,
+ final FilenameFilter filter)
throws IOException {
if (file == null)
@@ -835,7 +839,7 @@
final LoadStats totals = new LoadStats();
- loadFiles(totals, 0/* depth */, file, baseURI, rdfFormat, filter, true/* endOfBatch */
+ loadFiles(totals, 0/* depth */, file, baseURI, rdfFormat, defaultGraph, filter, true/* endOfBatch */
);
return totals;
@@ -844,7 +848,8 @@
protected void loadFiles(final LoadStats totals, final int depth,
final File file, final String baseURI, final RDFFormat rdfFormat,
- final FilenameFilter filter, final boolean endOfBatch)
+ final String defaultGraph, final FilenameFilter filter,
+ final boolean endOfBatch)
throws IOException {
if (file.isDirectory()) {
@@ -864,7 +869,7 @@
// final RDFFormat fmt = RDFFormat.forFileName(f.toString(),
// rdfFormat);
- loadFiles(totals, depth + 1, f, baseURI, rdfFormat, filter,
+ loadFiles(totals, depth + 1, f, baseURI, rdfFormat, defaultGraph, filter,
(depth == 0 && i < files.length ? false : endOfBatch));
}
@@ -919,7 +924,7 @@
final String s = baseURI != null ? baseURI : file.toURI()
.toString();
- loadData3(totals, reader, s, fmt, endOfBatch);
+ loadData3(totals, reader, s, fmt, defaultGraph, endOfBatch);
return;
@@ -955,7 +960,7 @@
*/
protected void loadData3(final LoadStats totals, final Object source,
final String baseURL, final RDFFormat rdfFormat,
- final boolean endOfBatch) throws IOException {
+ final String defaultGraph, final boolean endOfBatch) throws IOException {
final long begin = System.currentTimeMillis();
@@ -978,11 +983,10 @@
}
// Setup the loader.
- final PresortRioLoader loader = new PresortRioLoader(buffer);
+ final PresortRioLoader loader = new PresortRioLoader ( buffer ) ;
// @todo review: disable auto-flush - caller will handle flush of the buffer.
// loader.setFlush(false);
-
// add listener to log progress.
loader.addRioLoaderListener( new RioLoaderListener() {
@@ -1006,12 +1010,12 @@
if(source instanceof Reader) {
- loader.loadRdf((Reader) source, baseURL, rdfFormat, parserOptions);
+ loader.loadRdf((Reader) source, baseURL, rdfFormat, defaultGraph, parserOptions);
} else if (source instanceof InputStream) {
loader.loadRdf((InputStream) source, baseURL, rdfFormat,
- parserOptions);
+ defaultGraph, parserOptions);
} else
throw new AssertionError();
@@ -1360,7 +1364,7 @@
// rdfFormat, filter);
dataLoader.loadFiles(totals, 0/* depth */, fileOrDir, baseURI,
- rdfFormat, filter, true/* endOfBatch */
+ rdfFormat, null, filter, true/* endOfBatch */
);
}
Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -714,7 +714,7 @@
try {
// run the parser.
new MyLoader(buffer).loadRdf(reader, baseURL,
- defaultRDFFormat, s.parserOptions);
+ defaultRDFFormat, null, s.parserOptions);
} finally {
reader.close();
}
Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -401,7 +401,7 @@
});
- loader.loadRdf((Reader) reader, baseURI, rdfFormat, options);
+ loader.loadRdf((Reader) reader, baseURI, rdfFormat, null, options);
if (log.isInfoEnabled())
log.info("Done: " + resource);
@@ -681,7 +681,7 @@
loader.loadRdf(new BufferedReader(new InputStreamReader(
new FileInputStream(resource))), baseURI, rdfFormat,
- options);
+ null, options);
if(log.isInfoEnabled())
log.info("End of reparse: nerrors=" + nerrs + ", file="
Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -161,6 +161,7 @@
valuesInitialCapacity,//
bnodesInitialCapacity,//
RDFFormat.RDFXML, // defaultFormat
+ null, // defaultGraph
parserOptions, // parserOptions
false, // deleteAfter
poolSize, // parserPoolSize,
Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -400,6 +400,7 @@
valuesInitialCapacity,//
bnodesInitialCapacity,//
RDFFormat.RDFXML, // defaultFormat
+ null, // defaultGraph
parserOptions, //
false, // deleteAfter
parallel?5:1, // parserPoolSize,
Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestOptionals.java
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestOptionals.java 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestOptionals.java 2010-09-28 12:43:35 UTC (rev 3658)
@@ -78,16 +78,16 @@
super(name);
}
- public void test_optionals_nextedSubquery()
- {
-
- final Properties p = new Properties(getProperties());
-
- p.setProperty(AbstractRelation.Options.NESTED_SUBQUERY, "true");
-
- doOptionalsTest(p);
-
- }
+// public void test_optionals_nextedSubquery()
+// {
+//
+// final Properties p = new Properties(getProperties());
+//
+// p.setProperty(AbstractRelation.Options.NESTED_SUBQUERY, "true");
+//
+// doOptionalsTest(p);
+//
+// }
public void test_optionals_pipeline()
{
Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/samples/com/bigdata/samples/fastload.properties
===================================================================
--- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/samples/com/bigdata/samples/fastload.properties 2010-09-28 12:29:15 UTC (rev 3657)
+++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/samples/com/bigdata/samples/fastload.properties 2010-09-28 12:43:35 UTC (rev 3658)
@@ -1,7 +1,8 @@
-# Be very careful when you use this configuration! This turns off incremental
-# inference for load and retract, so you must explicitly force these operations,
-# which requires punching through the SAIL layer. Of course, if you are not
-# using inference then this is just the ticket and quite fast.
+# This configuration turns off incremental inference for load and retract, so
+# you must explicitly force these operations if you want to compute the closure
+# of the knowledge base. Forcing the closure requires punching through the SAIL
+# layer. Of course, if you are not using inference then this configuration is
+# just the ticket and is quite fast.
# set the initial and maximum extent o...
[truncated message content] |