From: <tho...@us...> - 2012-09-27 12:16:42
|
Revision: 6632 http://bigdata.svn.sourceforge.net/bigdata/?rev=6632&view=rev Author: thompsonbry Date: 2012-09-27 12:16:36 +0000 (Thu, 27 Sep 2012) Log Message: ----------- I have committed a change to AccessPath and SPOArrayIterator as a workaround for this issue. It raises the limit to 10,000,000 in both classes. The javadoc has been annotated to link to this issue. There may well be other limits that will impact truth maintenance. This all needs to be reviewed in depth. @see https://sourceforge.net/apps/trac/bigdata/ticket/606 (Array limits in truth maintenance code). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOArrayIterator.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java 2012-09-26 19:03:29 UTC (rev 6631) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java 2012-09-27 12:16:36 UTC (rev 6632) @@ -188,14 +188,17 @@ } /** - * The maximum <em>limit</em> that is allowed for a fully-buffered read. - * The {@link #asynchronousIterator(Iterator)} will always be used above - * this limit. + * The maximum <em>limit</em> that is allowed for a fully-buffered read. The + * {@link #asynchronousIterator(Iterator)} will always be used above this + * limit. * - * @todo This should probably be close to the branching factor or chunk - * capacity. + * FIXME Array limits in truth maintenance code. This should probably be + * close to the branching factor or chunk capacity. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/606"> + * Array limits in truth maintenance code. </a> */ - protected static final int MAX_FULLY_BUFFERED_READ_LIMIT = 250000; + protected static final int MAX_FULLY_BUFFERED_READ_LIMIT = 10000000; /** * We cache some stuff for historical reads. Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOArrayIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOArrayIterator.java 2012-09-26 19:03:29 UTC (rev 6631) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOArrayIterator.java 2012-09-27 12:16:36 UTC (rev 6632) @@ -30,7 +30,6 @@ import java.util.Arrays; import java.util.NoSuchElementException; -import com.bigdata.btree.ITupleIterator; import com.bigdata.rdf.inf.TruthMaintenance; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.relation.accesspath.IAccessPath; @@ -52,6 +51,17 @@ */ public class SPOArrayIterator implements IChunkedOrderedIterator<ISPO> { + /** + * The maximum capacity for the backing array. + * + * FIXME Array limits in truth maintenance code. This should probably be + * close to the branching factor or chunk capacity. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/606"> + * Array limits in truth maintenance code. </a> + */ + private static final long MAX_CAPACITY = 10000000; + private boolean open = true; /** @@ -197,14 +207,16 @@ final long rangeCount = accessPath.rangeCount(false/*exact*/); - if (rangeCount > 10000000) { + if (rangeCount >= MAX_CAPACITY) { /* * Note: This is a relatively high limit (10M statements). You are * much better off processing smaller chunks! */ - throw new RuntimeException("Too many statements to read into memory: "+rangeCount); + throw new RuntimeException( + "Too many statements to read into memory: rangeCount=" + + rangeCount + ", maxCapacity=" + MAX_CAPACITY); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |