|
From: <tho...@us...> - 2010-09-28 12:14:10
|
Revision: 3655
http://bigdata.svn.sourceforge.net/bigdata/?rev=3655&view=rev
Author: thompsonbry
Date: 2010-09-28 12:14:00 +0000 (Tue, 28 Sep 2010)
Log Message:
-----------
Merged trunk to branch [r3438:r3654]. No conflicts reported. JOURNAL_HA_BRANCH.
Modified Paths:
--------------
branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/cache/RingBuffer.java
branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/BufferMode.java
branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/util/config/NicUtil.java
branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/cache/TestRingBuffer.java
branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/TestAll.java
branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/TestTransactionService.java
branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java
branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java
branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java
branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java
branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java
branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java
branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java
branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java
branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java
branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java
branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java
branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java
branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java
branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java
branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java
branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestOptionals.java
branches/JOURNAL_HA_BRANCH/bigdata-sails/src/samples/com/bigdata/samples/fastload.properties
branches/JOURNAL_HA_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java
branches/JOURNAL_HA_BRANCH/build.xml
branches/JOURNAL_HA_BRANCH/src/resources/config/bigdataCluster.config
branches/JOURNAL_HA_BRANCH/src/resources/config/bigdataCluster16.config
branches/JOURNAL_HA_BRANCH/src/resources/config/bigdataStandalone.config
Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/cache/RingBuffer.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/cache/RingBuffer.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/cache/RingBuffer.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -154,7 +154,7 @@
public boolean add(final T ref) throws IllegalStateException {
if (ref == null)
- throw new IllegalArgumentException();
+ throw new NullPointerException();
beforeOffer( ref );
@@ -178,7 +178,7 @@
public boolean offer(final T ref) {
if (ref == null)
- throw new IllegalArgumentException();
+ throw new NullPointerException();
beforeOffer( ref );
@@ -491,10 +491,9 @@
*/
final public boolean scanHead(final int nscan, final T ref) {
- assert nscan > 0;
-// if (nscan <= 0)
-// throw new IllegalArgumentException();
-//
+ if (nscan <= 0)
+ throw new IllegalArgumentException();
+
if (ref == null)
throw new IllegalArgumentException();
Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/BufferMode.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/BufferMode.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/BufferMode.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -59,15 +59,16 @@
Transient(false/* stable */, true/* fullyBuffered */,StoreTypeEnum.WORM),
/**
+ * <strong>This mode is not being actively developed and should not be used
+ * outside of unit tests.</strong>
* <p>
- * A direct buffer is allocated for the file image. Writes are applied
- * to the buffer. The buffer tracks dirty slots regardless of the
- * transaction that wrote them and periodically writes dirty slots
- * through to disk. On commit, any dirty index or allocation nodes are
- * written onto the buffer and all dirty slots on the buffer. Dirty
- * slots in the buffer are then synchronously written to disk, the
- * appropriate root block is updated, and the file is (optionally)
- * flushed to disk.
+ * A direct buffer is allocated for the file image. Writes are applied to
+ * the buffer. The buffer tracks dirty slots regardless of the transaction
+ * that wrote them and periodically writes dirty slots through to disk. On
+ * commit, any dirty index or allocation nodes are written onto the buffer
+ * and all dirty slots on the buffer. Dirty slots in the buffer are then
+ * synchronously written to disk, the appropriate root block is updated, and
+ * the file is (optionally) flushed to disk.
* </p>
* <p>
* This option offers wires an image of the journal file into memory and
@@ -79,6 +80,9 @@
Direct(true/* stable */, true/* fullyBuffered */,StoreTypeEnum.WORM),
/**
+ * <strong>This mode is not being actively developed and should not be used
+ * outside of unit tests. Memory mapped IO has the fatal weakness under Java
+ * that you can not reliably close or extend the backing file.</strong>
* <p>
* A memory-mapped buffer is allocated for the file image. Writes are
* applied to the buffer. Reads read from the buffer. On commit, the map is
Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/util/config/NicUtil.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/util/config/NicUtil.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/util/config/NicUtil.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -26,28 +26,20 @@
package com.bigdata.util.config;
import java.io.IOException;
-import java.net.InetAddress;
import java.net.Inet4Address;
+import java.net.InetAddress;
import java.net.InterfaceAddress;
-import java.net.MalformedURLException;
import java.net.NetworkInterface;
import java.net.SocketException;
import java.net.UnknownHostException;
+import java.util.Collections;
+import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.Enumeration;
-import java.util.Collections;
-import java.util.logging.LogRecord;
import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import net.jini.config.Configuration;
-import net.jini.config.ConfigurationException;
-import com.sun.jini.config.Config;
-import com.sun.jini.logging.Levels;
-
/**
* Utility class that provides a set of static convenience methods
* related to processing information about the current node's Network
@@ -400,34 +392,34 @@
return macAddr;
}
- /**
- * Three-argument version of <code>getInetAddress</code> that retrieves
- * the desired interface name from the given <code>Configuration</code>
- * parameter.
- */
- public static InetAddress getInetAddress(Configuration config,
- String componentName,
- String nicNameEntry)
- {
- String nicName = "NoNetworkInterfaceName";
- try {
- nicName = (String)Config.getNonNullEntry(config,
- componentName,
- nicNameEntry,
- String.class,
- "eth0");
- } catch(ConfigurationException e) {
- jiniConfigLogger.log(WARNING, e
- +" - [componentName="+componentName
- +", nicNameEntry="+nicNameEntry+"]");
- utilLogger.log(Level.WARN, e
- +" - [componentName="+componentName
- +", nicNameEntry="+nicNameEntry+"]");
- e.printStackTrace();
- return null;
- }
- return ( getInetAddress(nicName, 0, null, false) );
- }
+// /**
+// * Three-argument version of <code>getInetAddress</code> that retrieves
+// * the desired interface name from the given <code>Configuration</code>
+// * parameter.
+// */
+// public static InetAddress getInetAddress(Configuration config,
+// String componentName,
+// String nicNameEntry)
+// {
+// String nicName = "NoNetworkInterfaceName";
+// try {
+// nicName = (String)Config.getNonNullEntry(config,
+// componentName,
+// nicNameEntry,
+// String.class,
+// "eth0");
+// } catch(ConfigurationException e) {
+// jiniConfigLogger.log(WARNING, e
+// +" - [componentName="+componentName
+// +", nicNameEntry="+nicNameEntry+"]");
+// utilLogger.log(Level.WARN, e
+// +" - [componentName="+componentName
+// +", nicNameEntry="+nicNameEntry+"]");
+// e.printStackTrace();
+// return null;
+// }
+// return ( getInetAddress(nicName, 0, null, false) );
+// }
// What follows are a number of versions of the getIpAddress method
// provided for convenience.
Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/cache/TestRingBuffer.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/cache/TestRingBuffer.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/cache/TestRingBuffer.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -425,8 +425,8 @@
try {
buffer.add(null);
- fail("Expecting: " + IllegalArgumentException.class);
- } catch (IllegalArgumentException ex) {
+ fail("Expecting: " + NullPointerException.class);
+ } catch (NullPointerException ex) {
if (log.isInfoEnabled())
log.info("Ignoring expected exception: " + ex);
}
@@ -438,8 +438,8 @@
try {
buffer.offer(null);
- fail("Expecting: " + IllegalArgumentException.class);
- } catch (IllegalArgumentException ex) {
+ fail("Expecting: " + NullPointerException.class);
+ } catch (NullPointerException ex) {
if (log.isInfoEnabled())
log.info("Ignoring expected exception: " + ex);
}
Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/TestAll.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/TestAll.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/TestAll.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -93,7 +93,25 @@
suite.addTest( TestTransientJournal.suite() );
- suite.addTest( TestDirectJournal.suite() );
+ /*
+ * Commented out since this mode is not used and there is an occasional
+ * test failure in:
+ *
+ * com.bigdata.journal.TestConcurrentJournal.test_concurrentReadersAreOk
+ *
+ * This error is stochastic and appears to be restricted to
+ * BufferMode#Direct. This is a journal mode based by a fixed capacity
+ * native ByteBuffer serving as a write through cache to the disk. Since
+ * the buffer can not be extended, that journal mode is not being
+ * excercised by anything. If you like, I can deprecate the Direct
+ * BufferMode and turn disable its test suite. (There is also a "Mapped"
+ * BufferMode whose tests we are not running due to problems with Java
+ * releasing native heap ByteBuffers and closing memory mapped files.
+ * Its use is strongly discouraged in the javadoc, but it has not been
+ * excised from the code since it might be appropriate for some
+ * applications.)
+ */
+// suite.addTest( TestDirectJournal.suite() );
/*
* Note: The mapped journal is somewhat problematic and its tests are
Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/TestTransactionService.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/TestTransactionService.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/TestTransactionService.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -40,6 +40,7 @@
import com.bigdata.service.AbstractTransactionService;
import com.bigdata.service.CommitTimeIndex;
import com.bigdata.service.TxServiceRunState;
+import com.bigdata.util.MillisecondTimestampFactory;
/**
* Unit tests of the {@link AbstractTransactionService} using a mock client.
@@ -259,6 +260,24 @@
}
+ /**
+ * FIXME This currently waits until at least two milliseconds have
+ * elapsed. This is a workaround for
+ * {@link TestTransactionService#test_newTx_readOnly()} until <a href=
+ * "https://sourceforge.net/apps/trac/bigdata/ticket/145" >ISSUE#145
+ * </a> is resolved. This override of {@link #nextTimestamp()} should
+ * be removed once that issue is fixed.
+ */
+ @Override
+ public long nextTimestamp() {
+
+ // skip at least one millisecond.
+ MillisecondTimestampFactory.nextMillis();
+
+ return MillisecondTimestampFactory.nextMillis();
+
+ }
+
}
/**
@@ -596,17 +615,25 @@
* GT the lastCommitTime since that could allow data not yet committed to
* become visible during the transaction (breaking isolation).
* <p>
- * A commitTime is identified by looking up the callers timestamp in a log of
- * the historical commit times and returning the first historical commit
+ * A commitTime is identified by looking up the callers timestamp in a log
+ * of the historical commit times and returning the first historical commit
* time LTE the callers timestamp.
* <p>
* The transaction start time is then chosen from the half-open interval
* <i>commitTime</i> (inclusive lower bound) : <i>nextCommitTime</i>
* (exclusive upper bound).
*
- * @throws IOException
+ * @throws IOException
*
- * @todo This test fails occasionally. I have not figured out why yet. BBT
+ * @todo This test fails occasionally. This occurs if the timestamps
+ * assigned by the {@link MockTransactionService} are only 1 unit
+ * apart. When that happens, there are not enough distinct values
+ * available to allow 2 concurrent read-only transactions. See <a
+ * href=
+ * "https://sourceforge.net/apps/trac/bigdata/ticket/145">ISSUE#145
+ * </a>. Also see {@link MockTransactionService#nextTimestamp()}
+ * which has been overridden to guarantee that there are at least
+ * two distinct values such that this test will pass.
*/
public void test_newTx_readOnly() throws IOException {
Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -194,20 +194,34 @@
*
* @throws InterruptedException
* @throws ExecutionException
+ *
+ * @todo This test now logs a warning rather than failing pending resolution
+ * of https://sourceforge.net/apps/trac/bigdata/ticket/147
*/
public void test_stress_startWriteStop2() throws InterruptedException,
ExecutionException {
- for (int i = 0; i < 10000; i++) {
+ final int LIMIT = 10000;
+ int nerr = 0;
+ for (int i = 0; i < LIMIT; i++) {
try {
doStartWriteStop2Test();
} catch (Throwable t) {
- fail("Pass#=" + i, t);
+ // fail("Pass#=" + i, t);
+ log.warn("Would have failed: pass#=" + i + ", cause=" + t);
+ nerr++;
}
}
+ if (nerr > 0) {
+
+ log.error("Test would have failed: nerrs=" + nerr + " out of "
+ + LIMIT + " trials");
+
+ }
+
}
/**
Modified: branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -34,7 +34,6 @@
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.io.Writer;
-import java.net.InetAddress;
import java.util.Arrays;
import java.util.Date;
import java.util.Enumeration;
@@ -131,8 +130,6 @@
public final Properties properties;
public final String[] jiniOptions;
- private final String serviceIpAddr;
-
protected void toString(StringBuilder sb) {
super.toString(sb);
@@ -178,12 +175,6 @@
} else {
log.warn("groups = " + Arrays.toString(this.groups));
}
-
- try {
- this.serviceIpAddr = NicUtil.getIpAddress("default.nic", "default", false);
- } catch(IOException e) {
- throw new ConfigurationException(e.getMessage(), e);
- }
}
/**
@@ -480,6 +471,9 @@
final ServiceDir serviceDir = new ServiceDir(this.serviceDir);
+ String serviceIpAddr = NicUtil.getIpAddress ( "default.nic", "default", false ) ;
+ if ( null == serviceIpAddr )
+ throw new IOException ( "Can't get a host ip address" ) ;
final Hostname hostName = new Hostname(serviceIpAddr);
final ServiceUUID serviceUUID = new ServiceUUID(this.serviceUUID);
Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -272,7 +272,18 @@
//
// /** {@value #DEFAULT_MAX_TRIES} */
// int DEFAULT_MAX_TRIES = 3;
-
+
+ /**
+ * The value that will be used for the graph/context co-ordinate when
+ * loading data represented in a triple format into a quad store.
+ */
+ String DEFAULT_GRAPH = "defaultGraph" ;
+
+ /**
+ * TODO Should we always enforce a real value? i.e. provide a real default
+ * or abort the load.
+ */
+ String DEFAULT_DEFAULT_GRAPH = null ;
}
/**
@@ -402,6 +413,12 @@
private transient RDFFormat rdfFormat;
/**
+ * The value that will be used for the graph/context co-ordinate when
+ * loading data represented in a triple format into a quad store.
+ */
+ public final String defaultGraph ;
+
+ /**
* Force the load of the NxParser integration class and its registration
* of the NQuadsParser#nquads RDFFormat.
*
@@ -496,6 +513,8 @@
sb.append(", " + ConfigurationOptions.RDF_FORMAT + "=" + rdfFormat);
+ sb.append(", " + ConfigurationOptions.DEFAULT_GRAPH + "=" + defaultGraph) ;
+
sb.append(", " + ConfigurationOptions.FORCE_OVERFLOW_BEFORE_CLOSURE + "="
+ forceOverflowBeforeClosure);
@@ -601,6 +620,10 @@
}
+ defaultGraph = (String) config.getEntry(component,
+ ConfigurationOptions.DEFAULT_GRAPH, String.class,
+ ConfigurationOptions.DEFAULT_DEFAULT_GRAPH);
+
rejectedExecutionDelay = (Long) config.getEntry(
component,
ConfigurationOptions.REJECTED_EXECUTION_DELAY, Long.TYPE,
@@ -979,6 +1002,7 @@
jobState.ontology,//file
jobState.ontology.getPath(),//baseURI
jobState.getRDFFormat(),//
+ jobState.defaultGraph,
jobState.ontologyFileFilter //
);
Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -223,6 +223,7 @@
jobState.valuesInitialCapacity,//
jobState.bnodesInitialCapacity,//
jobState.getRDFFormat(), //
+ jobState.defaultGraph,
parserOptions,//
false, // deleteAfter is handled by the master!
jobState.parserPoolSize, //
Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -186,7 +186,7 @@
// run the parser.
// @todo reuse the same underlying parser instance?
- loader.loadRdf(reader, baseURL, rdfFormat, parserOptions);
+ loader.loadRdf(reader, baseURL, rdfFormat, null, parserOptions);
success = true;
Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -356,8 +356,14 @@
* The default {@link RDFFormat}.
*/
private final RDFFormat defaultFormat;
-
+
/**
+ * The value that will be used for the graph/context co-ordinate when
+ * loading data represented in a triple format into a quad store.
+ */
+ private final String defaultGraph;
+
+ /**
* Options for the {@link RDFParser}.
*/
private final RDFParserOptions parserOptions;
@@ -1423,7 +1429,7 @@
try {
// run the parser.
new PresortRioLoader(buffer).loadRdf(reader, baseURL,
- rdfFormat, parserOptions);
+ rdfFormat, defaultGraph, parserOptions);
} finally {
reader.close();
}
@@ -1490,6 +1496,9 @@
* {@link BNode}s parsed from a single document.
* @param defaultFormat
* The default {@link RDFFormat} which will be assumed.
+ * @param defaultGraph
+ * The value that will be used for the graph/context co-ordinate when
+ * loading data represented in a triple format into a quad store.
* @param parserOptions
* Options for the {@link RDFParser}.
* @param deleteAfter
@@ -1529,6 +1538,7 @@
final int valuesInitialCapacity,//
final int bnodesInitialCapacity, //
final RDFFormat defaultFormat,//
+ final String defaultGraph,//
final RDFParserOptions parserOptions,//
final boolean deleteAfter,//
final int parserPoolSize,//
@@ -1566,6 +1576,8 @@
this.defaultFormat = defaultFormat;
+ this.defaultGraph = defaultGraph;
+
this.parserOptions = parserOptions;
this.deleteAfter = deleteAfter;
Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -37,6 +37,8 @@
import org.openrdf.rio.RDFParser;
import org.openrdf.rio.Rio;
+import com.bigdata.rdf.model.BigdataURI;
+
/**
* Parses data but does not load it into the indices.
*
@@ -74,6 +76,8 @@
private final ValueFactory valueFactory;
+ protected String defaultGraph;
+
public BasicRioLoader(final ValueFactory valueFactory) {
if (valueFactory == null)
@@ -153,18 +157,20 @@
}
final public void loadRdf(final InputStream is, final String baseURI,
- final RDFFormat rdfFormat, final RDFParserOptions options)
+ final RDFFormat rdfFormat, final String defaultGraph,
+ final RDFParserOptions options)
throws Exception {
- loadRdf2(is, baseURI, rdfFormat, options);
+ loadRdf2(is, baseURI, rdfFormat, defaultGraph, options);
}
final public void loadRdf(final Reader reader, final String baseURI,
- final RDFFormat rdfFormat, final RDFParserOptions options)
+ final RDFFormat rdfFormat, final String defaultGraph,
+ final RDFParserOptions options)
throws Exception {
- loadRdf2(reader, baseURI, rdfFormat, options);
+ loadRdf2(reader, baseURI, rdfFormat, defaultGraph, options);
}
@@ -180,7 +186,7 @@
* @throws Exception
*/
protected void loadRdf2(final Object source, final String baseURI,
- final RDFFormat rdfFormat, final RDFParserOptions options)
+ final RDFFormat rdfFormat, final String defaultGraph, final RDFParserOptions options)
throws Exception {
if (source == null)
@@ -198,6 +204,8 @@
if (log.isInfoEnabled())
log.info("format=" + rdfFormat + ", options=" + options);
+ this.defaultGraph = defaultGraph ;
+
final RDFParser parser = getParser(rdfFormat);
// apply options to the parser
@@ -212,7 +220,7 @@
// Note: reset so that rates are correct for each source loaded.
stmtsAdded = 0;
-
+
try {
before();
Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -72,12 +72,14 @@
* The base URL for those data.
* @param rdfFormat
* The interchange format.
+ * @param defaultGraph
+ * The default graph.
* @param options
* Options to be applied to the {@link RDFParser}.
* @throws Exception
*/
public void loadRdf(Reader reader, String baseURL, RDFFormat rdfFormat,
- RDFParserOptions options) throws Exception;
+ String defaultGraph, RDFParserOptions options) throws Exception;
/**
* Parse RDF data.
@@ -88,11 +90,13 @@
* The base URL for those data.
* @param rdfFormat
* The interchange format.
+ * @param defaultGraph
+ * The default graph.
* @param options
* Options to be applied to the {@link RDFParser}.
* @throws Exception
*/
public void loadRdf(InputStream is, String baseURI, RDFFormat rdfFormat,
- RDFParserOptions options) throws Exception;
+ String defaultGraph, RDFParserOptions options) throws Exception;
}
Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -23,11 +23,14 @@
*/
package com.bigdata.rdf.rio;
+import org.openrdf.model.Resource;
import org.openrdf.model.Statement;
import org.openrdf.model.Value;
import org.openrdf.rio.RDFHandler;
import org.openrdf.rio.RDFHandlerException;
+import com.bigdata.rdf.model.BigdataURI;
+
/**
* Statement handler for the RIO RDF Parser that writes on a
* {@link StatementBuffer}.
@@ -45,6 +48,12 @@
final protected IStatementBuffer<?> buffer;
/**
+ * The value that will be used for the graph/context co-ordinate when
+ * loading data represented in a triple format into a quad store.
+ */
+ private BigdataURI defaultGraphURI = null ;
+
+ /**
* Sets up parser to load RDF.
*
* @param buffer
@@ -58,7 +67,7 @@
this.buffer = buffer;
}
-
+
/**
* bulk insert the buffered data into the store.
*/
@@ -87,8 +96,11 @@
public RDFHandler newRDFHandler() {
+ defaultGraphURI = null != defaultGraph && 4 == buffer.getDatabase ().getSPOKeyArity ()
+ ? buffer.getDatabase ().getValueFactory ().createURI ( defaultGraph )
+ : null
+ ;
return this;
-
}
public void handleStatement( final Statement stmt ) {
@@ -98,9 +110,13 @@
log.debug(stmt);
}
-
+
+ Resource graph = stmt.getContext() ;
+ if ( null == graph
+ && null != defaultGraphURI ) // only true when we know we are loading a quad store
+ graph = defaultGraphURI ;
// buffer the write (handles overflow).
- buffer.add( stmt.getSubject(), stmt.getPredicate(), stmt.getObject(), stmt.getContext() );
+ buffer.add( stmt.getSubject(), stmt.getPredicate(), stmt.getObject(), graph );
stmtsAdded++;
Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -556,11 +556,18 @@
final int p = this.p.hashCode();
final int o = this.o.hashCode();
-
- // Note: historical behavior was (s,p,o) based hash.
- hashCode = 961 * ((int) (s ^ (s >>> 32))) + 31
- * ((int) (p ^ (p >>> 32))) + ((int) (o ^ (o >>> 32)));
+ /*
+ * Note: The historical behavior was based on the int64 term
+ * identifiers. Since the hash code is now computed from the int32
+ * hash codes of the (s,p,o) IV objects, the original bit math was
+ * resulting in a hash code which was always zero (any 32 bit value
+ * shifted right by 32 bits is zero).
+ */
+ hashCode = 961 * s + 31 * p + o;
+// hashCode = 961 * ((int) (s ^ (s >>> 32))) + 31
+// * ((int) (p ^ (p >>> 32))) + ((int) (o ^ (o >>> 32)));
+
}
return hashCode;
Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -640,7 +640,7 @@
final LoadStats totals = new LoadStats();
- loadData3(totals, reader, baseURL, rdfFormat, true/*endOfBatch*/);
+ loadData3(totals, reader, baseURL, rdfFormat, null, true/*endOfBatch*/);
return totals;
@@ -668,7 +668,7 @@
final LoadStats totals = new LoadStats();
- loadData3(totals, is, baseURL, rdfFormat, true/* endOfBatch */);
+ loadData3(totals, is, baseURL, rdfFormat, null, true/* endOfBatch */);
return totals;
@@ -704,7 +704,7 @@
final LoadStats totals = new LoadStats();
- loadData3(totals, is, baseURL, rdfFormat, true/*endOfBatch*/);
+ loadData3(totals, is, baseURL, rdfFormat, null, true/*endOfBatch*/);
return totals;
@@ -752,7 +752,7 @@
if(file.exists()) {
loadFiles(totals, 0/* depth */, file, baseURL,
- rdfFormat, filter, endOfBatch);
+ rdfFormat, null, filter, endOfBatch);
return;
@@ -778,7 +778,7 @@
try {
- loadData3(totals, reader, baseURL, rdfFormat, endOfBatch);
+ loadData3(totals, reader, baseURL, rdfFormat, null, endOfBatch);
} catch (Exception ex) {
@@ -806,6 +806,9 @@
* The format of the file (optional, when not specified the
* format is deduced for each file in turn using the
* {@link RDFFormat} static methods).
+ * @param defaultGraph
+ * The value that will be used for the graph/context co-ordinate when
+ * loading data represented in a triple format into a quad store.
* @param filter
* A filter selecting the file names that will be loaded
* (optional). When specified, the filter MUST accept directories
@@ -816,7 +819,8 @@
* @throws IOException
*/
public LoadStats loadFiles(final File file, final String baseURI,
- final RDFFormat rdfFormat, final FilenameFilter filter)
+ final RDFFormat rdfFormat, final String defaultGraph,
+ final FilenameFilter filter)
throws IOException {
if (file == null)
@@ -824,7 +828,7 @@
final LoadStats totals = new LoadStats();
- loadFiles(totals, 0/* depth */, file, baseURI, rdfFormat, filter, true/* endOfBatch */
+ loadFiles(totals, 0/* depth */, file, baseURI, rdfFormat, defaultGraph, filter, true/* endOfBatch */
);
return totals;
@@ -833,7 +837,8 @@
protected void loadFiles(final LoadStats totals, final int depth,
final File file, final String baseURI, final RDFFormat rdfFormat,
- final FilenameFilter filter, final boolean endOfBatch)
+ final String defaultGraph, final FilenameFilter filter,
+ final boolean endOfBatch)
throws IOException {
if (file.isDirectory()) {
@@ -853,7 +858,7 @@
// final RDFFormat fmt = RDFFormat.forFileName(f.toString(),
// rdfFormat);
- loadFiles(totals, depth + 1, f, baseURI, rdfFormat, filter,
+ loadFiles(totals, depth + 1, f, baseURI, rdfFormat, defaultGraph, filter,
(depth == 0 && i < files.length ? false : endOfBatch));
}
@@ -908,7 +913,7 @@
final String s = baseURI != null ? baseURI : file.toURI()
.toString();
- loadData3(totals, reader, s, fmt, endOfBatch);
+ loadData3(totals, reader, s, fmt, defaultGraph, endOfBatch);
return;
@@ -944,7 +949,7 @@
*/
protected void loadData3(final LoadStats totals, final Object source,
final String baseURL, final RDFFormat rdfFormat,
- final boolean endOfBatch) throws IOException {
+ final String defaultGraph, final boolean endOfBatch) throws IOException {
final long begin = System.currentTimeMillis();
@@ -967,11 +972,10 @@
}
// Setup the loader.
- final PresortRioLoader loader = new PresortRioLoader(buffer);
+ final PresortRioLoader loader = new PresortRioLoader ( buffer ) ;
// @todo review: disable auto-flush - caller will handle flush of the buffer.
// loader.setFlush(false);
-
// add listener to log progress.
loader.addRioLoaderListener( new RioLoaderListener() {
@@ -995,12 +999,12 @@
if(source instanceof Reader) {
- loader.loadRdf((Reader) source, baseURL, rdfFormat, parserOptions);
+ loader.loadRdf((Reader) source, baseURL, rdfFormat, defaultGraph, parserOptions);
} else if (source instanceof InputStream) {
loader.loadRdf((InputStream) source, baseURL, rdfFormat,
- parserOptions);
+ defaultGraph, parserOptions);
} else
throw new AssertionError();
@@ -1356,7 +1360,7 @@
// rdfFormat, filter);
dataLoader.loadFiles(totals, 0/* depth */, fileOrDir, baseURI,
- rdfFormat, filter, true/* endOfBatch */
+ rdfFormat, null, filter, true/* endOfBatch */
);
}
Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -714,7 +714,7 @@
try {
// run the parser.
new MyLoader(buffer).loadRdf(reader, baseURL,
- defaultRDFFormat, s.parserOptions);
+ defaultRDFFormat, null, s.parserOptions);
} finally {
reader.close();
}
Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -401,7 +401,7 @@
});
- loader.loadRdf((Reader) reader, baseURI, rdfFormat, options);
+ loader.loadRdf((Reader) reader, baseURI, rdfFormat, null, options);
if (log.isInfoEnabled())
log.info("Done: " + resource);
@@ -681,7 +681,7 @@
loader.loadRdf(new BufferedReader(new InputStreamReader(
new FileInputStream(resource))), baseURI, rdfFormat,
- options);
+ null, options);
if(log.isInfoEnabled())
log.info("End of reparse: nerrors=" + nerrs + ", file="
Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -161,6 +161,7 @@
valuesInitialCapacity,//
bnodesInitialCapacity,//
RDFFormat.RDFXML, // defaultFormat
+ null, // defaultGraph
parserOptions, // parserOptions
false, // deleteAfter
poolSize, // parserPoolSize,
Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -400,6 +400,7 @@
valuesInitialCapacity,//
bnodesInitialCapacity,//
RDFFormat.RDFXML, // defaultFormat
+ null, // defaultGraph
parserOptions, //
false, // deleteAfter
parallel?5:1, // parserPoolSize,
Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestOptionals.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestOptionals.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestOptionals.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -77,16 +77,16 @@
super(name);
}
- public void test_optionals_nextedSubquery()
- {
-
- final Properties p = new Properties(getProperties());
-
- p.setProperty(AbstractRelation.Options.NESTED_SUBQUERY, "true");
-
- doOptionalsTest(p);
-
- }
+// public void test_optionals_nextedSubquery()
+// {
+//
+// final Properties p = new Properties(getProperties());
+//
+// p.setProperty(AbstractRelation.Options.NESTED_SUBQUERY, "true");
+//
+// doOptionalsTest(p);
+//
+// }
public void test_optionals_pipeline()
{
Modified: branches/JOURNAL_HA_BRANCH/bigdata-sails/src/samples/com/bigdata/samples/fastload.properties
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-sails/src/samples/com/bigdata/samples/fastload.properties 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-sails/src/samples/com/bigdata/samples/fastload.properties 2010-09-28 12:14:00 UTC (rev 3655)
@@ -1,7 +1,8 @@
-# Be very careful when you use this configuration! This turns off incremental
-# inference for load and retract, so you must explicitly force these operations,
-# which requires punching through the SAIL layer. Of course, if you are not
-# using inference then this is just the ticket and quite fast.
+# This configuration turns off incremental inference for load and retract, so
+# you must explicitly force these operations if you want to compute the closure
+# of the knowledge base. Forcing the closure requires punching through the SAIL
+# layer. Of course, if you are not using inference then this configuration is
+# just the ticket and is quite fast.
# set the initial and maximum extent of the journal
com.bigdata.journal.AbstractJournal.initialExtent=209715200
Modified: branches/JOURNAL_HA_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java
===================================================================
--- branches/JOURNAL_HA_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java 2010-09-28 12:14:00 UTC (rev 3655)
@@ -1204,7 +1204,7 @@
try {
dataLoader.loadFiles(dataDir, null/* baseURI */,
- null/* rdfFormat */, filter);
+ null/* rdfFormat */, null, /* defaultGraph */filter);
} catch (IOException ex) {
Modified: branches/JOURNAL_HA_BRANCH/build.xml
===================================================================
--- branches/JOURNAL_HA_BRANCH/build.xml 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/build.xml 2010-09-28 12:14:00 UTC (rev 3655)
@@ -2002,6 +2002,10 @@
<fileset dir="${bigdata.dir}/bigdata/lib">
<include name="**/*.jar" />
</fileset>
+ <fileset dir="${bigdata.dir}/bigdata-jini/lib/jini/lib">
+ <include name="jini-core.jar" />
+ <include name="jini-ext.jar" />
+ </fileset>
</copy>
<!-- copy resources to Workbench webapp. -->
Modified: branches/JOURNAL_HA_BRANCH/src/resources/config/bigdataCluster.config
===================================================================
--- branches/JOURNAL_HA_BRANCH/src/resources/config/bigdataCluster.config 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/src/resources/config/bigdataCluster.config 2010-09-28 12:14:00 UTC (rev 3655)
@@ -758,10 +758,11 @@
* have for your applications!
*/
"-Xmx1600m",// was 800
- /* Optionally, grab all/most of the max heap at once. This makes sense for
- * DS but is less necessary for other bigdata services.
+ /* Pre-allocation of the DS heap is no longer recommended.
+ *
+ * See https://sourceforge.net/apps/trac/bigdata/ticket/157
+ "-Xms800m",
*/
- "-Xms800m", // 1/2 of the max heap is a good value.
/*
* This option will keep the JVM "alive" even when it is memory starved
* but perform of a memory starved JVM is terrible.
Modified: branches/JOURNAL_HA_BRANCH/src/resources/config/bigdataCluster16.config
===================================================================
--- branches/JOURNAL_HA_BRANCH/src/resources/config/bigdataCluster16.config 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/src/resources/config/bigdataCluster16.config 2010-09-28 12:14:00 UTC (rev 3655)
@@ -813,12 +813,11 @@
* http://blogs.msdn.com/ntdebugging/archive/2009/02/06/microsoft-windows-dynamic-cache-service.aspx
*/
"-Xmx9G", // Note: out of 32 available!
- /* Optionally, grab all/most of the max heap at once. This makes sense for
- * DS, but is less necessary for other bigdata services. If the machine is
- * dedicated to the DataService then use the maximum heap. Otherwise 1/2 of
- * the maximum heap is a good value.
- */
+ /* Pre-allocation of the DS heap is no longer recommended.
+ *
+ * See https://sourceforge.net/apps/trac/bigdata/ticket/157
"-Xms9G",
+ */
/*
* FIXME This might not be required, so that should be tested.
* However, you don't want the JVM to just die if it is being
@@ -1298,11 +1297,11 @@
static private namespace = "U"+univNum+"";
// minimum #of data services to run.
- static private minDataServices = bigdata.dataServiceCount;
+// static private minDataServices = bigdata.dataServiceCount; // unused
// How long the master will wait to discover the minimum #of data
// services that you specified (ms).
- static private awaitDataServicesTimeout = 8000;
+// static private awaitDataServicesTimeout = 8000; // unused.
/* Multiplier for the scatter effect.
*/
Modified: branches/JOURNAL_HA_BRANCH/src/resources/config/bigdataStandalone.config
===================================================================
--- branches/JOURNAL_HA_BRANCH/src/resources/config/bigdataStandalone.config 2010-09-28 11:50:52 UTC (rev 3654)
+++ branches/JOURNAL_HA_BRANCH/src/resources/config/bigdataStandalone.config 2010-09-28 12:14:00 UTC (rev 3655)
@@ -781,10 +781,11 @@
* have for your applications!
*/
"-Xmx4g",// was 800
- /* Optionally, grab all/most of the max heap at once. This makes sense for
- * DS but is less necessary for other bigdata services.
+ /* Pre-allocation of the DS heap is no longer recommended.
+ *
+ * See https://sourceforge.net/apps/trac/bigdata/ticket/157
+ "-Xms2G",
*/
- "-Xms2G", // 1/2 of the max heap is a good value.
/*
* This option will keep the JVM "alive" even when it is memory starved
* but perform of a memory starved JVM is terrible.
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|