This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tho...@us...> - 2014-02-20 12:13:09
|
Revision: 7857 http://sourceforge.net/p/bigdata/code/7857 Author: thompsonbry Date: 2014-02-20 12:13:07 +0000 (Thu, 20 Feb 2014) Log Message: ----------- Removed the use of some jetty classes. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/DescribeBindingsCollector.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/DescribeCacheUpdater.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/DescribeBindingsCollector.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/DescribeBindingsCollector.java 2014-02-20 12:02:54 UTC (rev 7856) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/DescribeBindingsCollector.java 2014-02-20 12:13:07 UTC (rev 7857) @@ -31,7 +31,6 @@ import java.util.Set; import org.apache.log4j.Logger; -import org.eclipse.jetty.util.ConcurrentHashSet; import org.openrdf.query.Binding; import org.openrdf.query.BindingSet; import org.openrdf.query.QueryEvaluationException; @@ -51,7 +50,7 @@ .getLogger(DescribeBindingsCollector.class); private final IVariable<?>[] originalVars; - private final ConcurrentHashSet<BigdataValue> describedResources; + private final Set<BigdataValue> describedResources; private final CloseableIteration<BindingSet, QueryEvaluationException> src; private boolean open = true; @@ -61,12 +60,13 @@ * The set of variables whose distinct bound values will be * reported. * @param describedResources - * The set of distinct bound values for those variables. + * The set of distinct bound values for those variables (a high + * concurrency, thread-safe set). * @param src * The source iterator. */ public DescribeBindingsCollector(final Set<IVariable<?>> originalVars, - final ConcurrentHashSet<BigdataValue> describedResources, + final Set<BigdataValue> describedResources, final CloseableIteration<BindingSet, QueryEvaluationException> src) { if (originalVars == null) Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/DescribeCacheUpdater.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/DescribeCacheUpdater.java 2014-02-20 12:02:54 UTC (rev 7856) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/DescribeCacheUpdater.java 2014-02-20 12:13:07 UTC (rev 7857) @@ -4,9 +4,9 @@ import java.util.HashMap; import java.util.Map; +import java.util.Set; import org.apache.log4j.Logger; -import org.eclipse.jetty.util.ConcurrentHashSet; import org.openrdf.model.Graph; import org.openrdf.model.Resource; import org.openrdf.model.Value; @@ -45,12 +45,12 @@ * original DESCRIBE query. We will collect all statements having a * described resource as either a subject or an object. * <p> - * Note: This set is populated as the solutions are observed before they - * are fed into the {@link ASTConstructIterator}. It is a - * {@link ConcurrentHashSet} in order to ensure the visibility of the - * updates to this class. + * Note: This set is populated as the solutions are observed before they are + * fed into the {@link ASTConstructIterator}. It MUST be a thread-safe + * {@link Set} in order to ensure the visibility of the updates to this + * class. It should also support high concurrency. */ - private final ConcurrentHashSet<BigdataValue> describedResources; + private final Set<BigdataValue> describedResources; /** * The source iterator visiting the statements that are the description @@ -78,10 +78,11 @@ * @param cache * The cache to be updated. * @param describedResources - * The {@link BigdataValue}s that become bound for the - * projection of the original DESCRIBE query. We will collect - * all statements having a described resource as either a - * subject or an object. + * The {@link BigdataValue}s that become bound for the projection + * of the original DESCRIBE query. We will collect all statements + * having a described resource as either a subject or an object. + * This MUST be a thread-safe (and concurrency favorable) set in + * order to ensure the visibility of the updates. * @param src * The source iterator, visiting the statements that are the * description of the resource(s) identified in the @@ -89,7 +90,7 @@ */ public DescribeCacheUpdater( final IDescribeCache cache, - final ConcurrentHashSet<BigdataValue> describedResources, + final Set<BigdataValue> describedResources, final CloseableIteration<BigdataStatement, QueryEvaluationException> src) { if (cache == null) Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java 2014-02-20 12:02:54 UTC (rev 7856) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java 2014-02-20 12:13:07 UTC (rev 7857) @@ -36,10 +36,10 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentSkipListSet; import org.apache.log4j.Logger; import org.apache.log4j.MDC; -import org.eclipse.jetty.util.ConcurrentHashSet; import org.openrdf.model.Value; import org.openrdf.query.Binding; import org.openrdf.query.BindingSet; @@ -513,7 +513,7 @@ try { final CloseableIteration<BindingSet, QueryEvaluationException> solutions2; - final ConcurrentHashSet<BigdataValue> describedResources; + final Set<BigdataValue> describedResources; if (describeCache != null) { /** @@ -540,7 +540,7 @@ */ // Concurrency safe set. - describedResources = new ConcurrentHashSet<BigdataValue>(); + describedResources = new ConcurrentSkipListSet<BigdataValue>(); // Collect the bindings on those variables. solutions2 = new DescribeBindingsCollector(// This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-02-20 12:02:57
|
Revision: 7856 http://sourceforge.net/p/bigdata/code/7856 Author: thompsonbry Date: 2014-02-20 12:02:54 +0000 (Thu, 20 Feb 2014) Log Message: ----------- Removed junit import. This has also been removed in the RDR branch. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2014-02-20 00:08:03 UTC (rev 7855) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2014-02-20 12:02:54 UTC (rev 7856) @@ -40,8 +40,6 @@ import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; -import junit.framework.AssertionFailedError; - import org.apache.log4j.Logger; import com.bigdata.bop.BOp; @@ -1055,7 +1053,7 @@ // The cutoff limit. This annotation MUST exist on the JOIN. if (limit != ((Long) joinOp.getRequiredProperty(JoinAnnotations.LIMIT)) .intValue()) - throw new AssertionFailedError(); + throw new AssertionError(); final int joinId = joinOp.getId(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-02-20 00:08:07
|
Revision: 7855 http://sourceforge.net/p/bigdata/code/7855 Author: thompsonbry Date: 2014-02-20 00:08:03 +0000 (Thu, 20 Feb 2014) Log Message: ----------- Missed in the last commit. Modified Paths: -------------- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/util/AbstractGraphFixture.java Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/util/AbstractGraphFixture.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/util/AbstractGraphFixture.java 2014-02-20 00:07:56 UTC (rev 7854) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/util/AbstractGraphFixture.java 2014-02-20 00:08:03 UTC (rev 7855) @@ -28,7 +28,7 @@ SailConnection cxn = null; try { cxn = getSail().getConnection(); - new SailGraphLoader(cxn).loadGraph(null/* fallback */, resources); + newSailGraphLoader(cxn).loadGraph(null/* fallback */, resources); cxn.commit(); ok = true; } finally { @@ -41,4 +41,10 @@ } + protected SailGraphLoader newSailGraphLoader(SailConnection cxn) { + + return new SailGraphLoader(cxn); + + } + } \ No newline at end of file This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-02-20 00:07:59
|
Revision: 7854 http://sourceforge.net/p/bigdata/code/7854 Author: thompsonbry Date: 2014-02-20 00:07:56 +0000 (Thu, 20 Feb 2014) Log Message: ----------- Exposed the ValueFactory to the GraphLoader abstract to support RDR, which needs to be able to create statements about statements using a bigdata custom value factory. Added a weighted SSSP test case. This test fails. It will pass once I modify the test and the Bigdata GAS Engine implementation to support the RDR access paths. @See #526 (RDR) Modified Paths: -------------- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASEngine.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGraphLoader.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/util/GraphLoader.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/util/SailGraphLoader.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASRunner.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGraphFixture.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/AbstractBigdataGraphTestCase.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestSSSP.java Added Paths: ----------- branches/RDR/bigdata-gas/src/test/com/bigdata/rdf/graph/data/smallWeightedGraph.ttl branches/RDR/bigdata-gas/src/test/com/bigdata/rdf/graph/data/weightedSmallGraph.png Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASEngine.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASEngine.java 2014-02-20 00:04:21 UTC (rev 7853) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASEngine.java 2014-02-20 00:07:56 UTC (rev 7854) @@ -36,8 +36,6 @@ import com.bigdata.rdf.graph.IGraphAccessor; import com.bigdata.rdf.graph.impl.GASEngine; import com.bigdata.rdf.graph.impl.util.VertexDistribution; -import com.bigdata.rdf.internal.IV; -import com.bigdata.rdf.store.AbstractTripleStore; import cutthecrap.utils.striterators.EmptyIterator; import cutthecrap.utils.striterators.IStriterator; Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGraphLoader.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGraphLoader.java 2014-02-20 00:04:21 UTC (rev 7853) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGraphLoader.java 2014-02-20 00:07:56 UTC (rev 7854) @@ -17,6 +17,7 @@ import org.openrdf.model.Resource; import org.openrdf.model.Statement; +import org.openrdf.model.ValueFactory; import org.openrdf.rio.RDFHandlerException; import com.bigdata.rdf.graph.impl.ram.RAMGASEngine.RAMGraph; @@ -61,5 +62,10 @@ } } + + @Override + protected ValueFactory getValueFactory() { + return g.getValueFactory(); + } } Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/util/GraphLoader.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/util/GraphLoader.java 2014-02-20 00:04:21 UTC (rev 7853) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/util/GraphLoader.java 2014-02-20 00:07:56 UTC (rev 7854) @@ -31,6 +31,7 @@ import org.apache.log4j.Logger; import org.openrdf.model.Resource; import org.openrdf.model.Statement; +import org.openrdf.model.ValueFactory; import org.openrdf.rio.RDFFormat; import org.openrdf.rio.RDFHandlerException; import org.openrdf.rio.RDFParseException; @@ -95,6 +96,12 @@ } /** + * Return the {@link ValueFactory} that will be set on the {@link RDFParser} + * . This is necessary for the RDR parser. + */ + abstract protected ValueFactory getValueFactory(); + + /** * Load a resource from the classpath, the file system, or a URI. GZ * compressed files are decompressed. Directories are processed recursively. * The entries in a ZIP archive are processed. Resources that are not @@ -280,6 +287,14 @@ rdfParser.setStopAtFirstError(false); + final ValueFactory vf = getValueFactory(); + + if (vf != null) { + + rdfParser.setValueFactory(vf); + + } + final AddStatementHandler h = newStatementHandler(); rdfParser.setRDFHandler(h); @@ -332,6 +347,7 @@ this.defaultContext = new Resource[0]; } + @Override public void handleStatement(final Statement stmt) throws RDFHandlerException { Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/util/SailGraphLoader.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/util/SailGraphLoader.java 2014-02-20 00:04:21 UTC (rev 7853) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/util/SailGraphLoader.java 2014-02-20 00:07:56 UTC (rev 7854) @@ -17,6 +17,7 @@ import org.openrdf.model.Resource; import org.openrdf.model.Statement; +import org.openrdf.model.ValueFactory; import org.openrdf.rio.RDFHandlerException; import org.openrdf.sail.SailConnection; import org.openrdf.sail.SailException; @@ -75,4 +76,11 @@ } + @Override + protected ValueFactory getValueFactory() { + + return null; + + } + } Added: branches/RDR/bigdata-gas/src/test/com/bigdata/rdf/graph/data/smallWeightedGraph.ttl =================================================================== --- branches/RDR/bigdata-gas/src/test/com/bigdata/rdf/graph/data/smallWeightedGraph.ttl (rev 0) +++ branches/RDR/bigdata-gas/src/test/com/bigdata/rdf/graph/data/smallWeightedGraph.ttl 2014-02-20 00:07:56 UTC (rev 7854) @@ -0,0 +1,24 @@ +# A graph using the RDR syntax to express link weights. +# +@prefix bd: <http://www.bigdata.com/> . +@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . +@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . +@prefix foaf: <http://xmlns.com/foaf/0.1/> . + + bd:1 foaf:knows bd:2 . +<<bd:1 foaf:knows bd:2 >> bd:weight "100"^^xsd:int . + + bd:1 foaf:knows bd:3 . +<<bd:1 foaf:knows bd:3 >> bd:weight "100"^^xsd:int . + + bd:2 foaf:knows bd:4 . +<<bd:2 foaf:knows bd:4 >> bd:weight "50"^^xsd:int . + + bd:3 foaf:knows bd:4 . +<<bd:3 foaf:knows bd:4 >> bd:weight "100"^^xsd:int . + + bd:3 foaf:knows bd:5 . +<<bd:3 foaf:knows bd:5 >> bd:weight "100"^^xsd:int . + + bd:4 foaf:knows bd:5 . +<<bd:4 foaf:knows bd:5 >> bd:weight "25"^^xsd:int . Added: branches/RDR/bigdata-gas/src/test/com/bigdata/rdf/graph/data/weightedSmallGraph.png =================================================================== (Binary files differ) Index: branches/RDR/bigdata-gas/src/test/com/bigdata/rdf/graph/data/weightedSmallGraph.png =================================================================== --- branches/RDR/bigdata-gas/src/test/com/bigdata/rdf/graph/data/weightedSmallGraph.png 2014-02-20 00:04:21 UTC (rev 7853) +++ branches/RDR/bigdata-gas/src/test/com/bigdata/rdf/graph/data/weightedSmallGraph.png 2014-02-20 00:07:56 UTC (rev 7854) Property changes on: branches/RDR/bigdata-gas/src/test/com/bigdata/rdf/graph/data/weightedSmallGraph.png ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASRunner.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASRunner.java 2014-02-20 00:04:21 UTC (rev 7853) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASRunner.java 2014-02-20 00:07:56 UTC (rev 7854) @@ -21,9 +21,9 @@ import com.bigdata.rdf.graph.IGASProgram; import com.bigdata.rdf.graph.IGraphAccessor; import com.bigdata.rdf.graph.impl.bd.BigdataGASEngine.BigdataGraphAccessor; +import com.bigdata.rdf.graph.impl.bd.BigdataGraphFixture.BigdataSailGraphLoader; import com.bigdata.rdf.graph.impl.util.GASRunnerBase; import com.bigdata.rdf.graph.util.GraphLoader; -import com.bigdata.rdf.graph.util.SailGraphLoader; import com.bigdata.rdf.sail.BigdataSail; import com.bigdata.rdf.store.AbstractTripleStore; @@ -517,7 +517,7 @@ boolean ok = false; final SailConnection cxn = sail.getUnisolatedConnection(); try { - final GraphLoader loader = new SailGraphLoader(cxn); + final GraphLoader loader = new BigdataSailGraphLoader(cxn); for (String f : loadSet) { loader.loadGraph(null/* fallback */, f/* resource */); } Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGraphFixture.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGraphFixture.java 2014-02-20 00:04:21 UTC (rev 7853) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGraphFixture.java 2014-02-20 00:07:56 UTC (rev 7854) @@ -2,13 +2,16 @@ import java.util.Properties; +import org.openrdf.model.ValueFactory; import org.openrdf.sail.SailConnection; import org.openrdf.sail.SailException; import com.bigdata.rdf.graph.IGraphAccessor; import com.bigdata.rdf.graph.impl.bd.BigdataGASEngine.BigdataGraphAccessor; import com.bigdata.rdf.graph.util.AbstractGraphFixture; +import com.bigdata.rdf.graph.util.SailGraphLoader; import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; import com.bigdata.rdf.store.AbstractTripleStore; public class BigdataGraphFixture extends AbstractGraphFixture { @@ -58,6 +61,13 @@ } @Override + protected SailGraphLoader newSailGraphLoader(SailConnection cxn) { + + return new BigdataSailGraphLoader(cxn); + + } + + @Override public BigdataGASEngine newGASEngine(final int nthreads) { return new BigdataGASEngine(sail, nthreads); @@ -71,5 +81,28 @@ .getIndexManager()); } + + public static class BigdataSailGraphLoader extends SailGraphLoader { + private final ValueFactory valueFactory; + + public BigdataSailGraphLoader(SailConnection cxn) { + + super(cxn); + + // Note: Needed for RDR. + this.valueFactory = ((BigdataSailConnection) cxn).getBigdataSail() + .getValueFactory(); + + } + + @Override + protected ValueFactory getValueFactory() { + + return valueFactory; + + } + + } + } \ No newline at end of file Modified: branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/AbstractBigdataGraphTestCase.java =================================================================== --- branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/AbstractBigdataGraphTestCase.java 2014-02-20 00:04:21 UTC (rev 7853) +++ branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/AbstractBigdataGraphTestCase.java 2014-02-20 00:07:56 UTC (rev 7854) @@ -66,7 +66,7 @@ } - private Properties getProperties() { + protected Properties getProperties() { final Properties p = new Properties(); @@ -190,4 +190,93 @@ } + /** + * A small weighted graph data set. + * + * @see {@value #smallWeightedGraph} + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ + protected class SmallWeightedGraphProblem { + + /** + * The data file. + */ + static private final String smallWeightedGraph = "bigdata-gas/src/test/com/bigdata/rdf/graph/data/smallWeightedGraph.ttl"; + + private final BigdataURI foafKnows, v1, v2, v3, v4, v5; + + public SmallWeightedGraphProblem() throws Exception { + + getGraphFixture().loadGraph(smallWeightedGraph); + + final BigdataSail sail = getGraphFixture().getSail(); + + final ValueFactory vf = sail.getValueFactory(); + + foafKnows = (BigdataURI) vf + .createURI("http://xmlns.com/foaf/0.1/knows"); + + v1 = (BigdataURI) vf.createURI("http://www.bigdata.com/1"); + v2 = (BigdataURI) vf.createURI("http://www.bigdata.com/2"); + v3 = (BigdataURI) vf.createURI("http://www.bigdata.com/3"); + v4 = (BigdataURI) vf.createURI("http://www.bigdata.com/4"); + v5 = (BigdataURI) vf.createURI("http://www.bigdata.com/5"); + + final BigdataValue[] terms = new BigdataValue[] { foafKnows, v1, + v2, v3, v4, v5 }; + + // batch resolve existing IVs. + ((BigdataSail) sail).getDatabase().getLexiconRelation() + .addTerms(terms, terms.length, true/* readOnly */); + + for (BigdataValue v : terms) { + if (v.getIV() == null) + fail("Did not resolve: " + v); + } + + } + + @SuppressWarnings("rawtypes") + public IV getFoafKnows() { + return foafKnows.getIV(); + } + + @SuppressWarnings("rawtypes") + public IV getV1() { + return v1.getIV(); + } + + @SuppressWarnings("rawtypes") + public IV getV2() { + return v2.getIV(); + } + + @SuppressWarnings("rawtypes") + public IV getV3() { + return v3.getIV(); + } + + @SuppressWarnings("rawtypes") + public IV getV4() { + return v4.getIV(); + } + + @SuppressWarnings("rawtypes") + public IV getV5() { + return v5.getIV(); + } + + + } + + /** + * Load and setup the {@link SmallWeightedGraphProblem}. + */ + protected SmallWeightedGraphProblem setupSmallWeightedGraphProblem() throws Exception { + + return new SmallWeightedGraphProblem(); + + } + } Modified: branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestSSSP.java =================================================================== --- branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestSSSP.java 2014-02-20 00:04:21 UTC (rev 7853) +++ branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestSSSP.java 2014-02-20 00:07:56 UTC (rev 7854) @@ -82,5 +82,49 @@ } } + + /** + * A unit test based on graph with link weights. + */ + public void test_sssp_weightedGraph() throws Exception { + + final SmallWeightedGraphProblem p = setupSmallWeightedGraphProblem(); + final IGASEngine gasEngine = getGraphFixture() + .newGASEngine(1/* nthreads */); + + try { + + final IGraphAccessor graphAccessor = getGraphFixture() + .newGraphAccessor(null/* ignored */); + + final IGASContext<SSSP.VS, SSSP.ES, Integer> gasContext = gasEngine + .newGASContext(graphAccessor, new SSSP()); + + final IGASState<SSSP.VS, SSSP.ES, Integer> gasState = gasContext.getGASState(); + + // Initialize the froniter. + gasState.setFrontier(gasContext, p.getV1()); + + // Converge. + gasContext.call(); + + assertEquals(0, gasState.getState(p.getV1()).dist()); + + assertEquals(100, gasState.getState(p.getV2()).dist()); + + assertEquals(100, gasState.getState(p.getV3()).dist()); + + assertEquals(125, gasState.getState(p.getV4()).dist()); + + assertEquals(125, gasState.getState(p.getV5()).dist()); + + } finally { + + gasEngine.shutdownNow(); + + } + + } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-02-20 00:04:25
|
Revision: 7853 http://sourceforge.net/p/bigdata/code/7853 Author: thompsonbry Date: 2014-02-20 00:04:21 +0000 (Thu, 20 Feb 2014) Log Message: ----------- imports. added useful exception for people who forget to set the value factory. Modified Paths: -------------- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/turtle/BigdataTurtleParser.java Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/turtle/BigdataTurtleParser.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/turtle/BigdataTurtleParser.java 2014-02-20 00:02:00 UTC (rev 7852) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/turtle/BigdataTurtleParser.java 2014-02-20 00:04:21 UTC (rev 7853) @@ -14,8 +14,6 @@ import org.openrdf.model.URI; import org.openrdf.model.Value; import org.openrdf.model.ValueFactory; -import org.openrdf.model.impl.ValueFactoryImpl; -import org.openrdf.rio.RDFHandlerException; import org.openrdf.rio.RDFParseException; import org.openrdf.rio.turtle.TurtleParser; import org.openrdf.rio.turtle.TurtleUtil; @@ -44,1137 +42,1146 @@ */ public class BigdataTurtleParser extends TurtleParser { -// /*-----------* -// * Variables * -// *-----------*/ +// /*-----------* +// * Variables * +// *-----------*/ // -// private LineNumberReader lineReader; +// private LineNumberReader lineReader; // -// private PushbackReader reader; +// private PushbackReader reader; // -// private Resource subject; +// private Resource subject; // -// private URI predicate; +// private URI predicate; // -// private Value object; +// private Value object; // -// /*--------------* -// * Constructors * -// *--------------*/ +// /*--------------* +// * Constructors * +// *--------------*/ // -// /** -// * Creates a new TurtleParser that will use a {@link ValueFactoryImpl} to -// * create RDF model objects. -// */ -// public BigdataTurtleParser() { -// this(null); -// } +// /** +// * Creates a new TurtleParser that will use a {@link ValueFactoryImpl} to +// * create RDF model objects. +// */ +// public BigdataTurtleParser() { +// this(null); +// } - private BigdataValueFactory valueFactory; - -// /** -// * Creates a new TurtleParser that will use the supplied ValueFactory to -// * create RDF model objects. -// * -// * @param valueFactory -// * A ValueFactory. -// */ -// public BigdataTurtleParser(ValueFactory valueFactory) { -// super(valueFactory); -// -// if (valueFactory instanceof BigdataValueFactory) -// this.valueFactory = (BigdataValueFactory) valueFactory; -// else -// this.valueFactory = null; -// } - - @Override - public void setValueFactory(ValueFactory valueFactory) { - super.setValueFactory(valueFactory); - - if (valueFactory instanceof BigdataValueFactory) - this.valueFactory = (BigdataValueFactory) valueFactory; - else - this.valueFactory = null; - } + private BigdataValueFactory valueFactory; + +// /** +// * Creates a new TurtleParser that will use the supplied ValueFactory to +// * create RDF model objects. +// * +// * @param valueFactory +// * A ValueFactory. +// */ +// public BigdataTurtleParser(ValueFactory valueFactory) { +// super(valueFactory); +// +// if (valueFactory instanceof BigdataValueFactory) +// this.valueFactory = (BigdataValueFactory) valueFactory; +// else +// this.valueFactory = null; +// } + + @Override + public void setValueFactory(ValueFactory valueFactory) { + super.setValueFactory(valueFactory); + + if (valueFactory instanceof BigdataValueFactory) + this.valueFactory = (BigdataValueFactory) valueFactory; + else + this.valueFactory = null; + } // -// /*---------* -// * Methods * -// *---------*/ +// /*---------* +// * Methods * +// *---------*/ // -// public RDFFormat getRDFFormat() { -// return RDFFormat.TURTLE; -// } +// public RDFFormat getRDFFormat() { +// return RDFFormat.TURTLE; +// } // -// /** -// * Implementation of the <tt>parse(InputStream, String)</tt> method defined -// * in the RDFParser interface. -// * -// * @param in -// * The InputStream from which to read the data, must not be -// * <tt>null</tt>. The InputStream is supposed to contain UTF-8 encoded -// * Unicode characters, as per the Turtle specification. -// * @param baseURI -// * The URI associated with the data in the InputStream, must not be -// * <tt>null</tt>. -// * @throws IOException -// * If an I/O error occurred while data was read from the InputStream. -// * @throws RDFParseException -// * If the parser has found an unrecoverable parse error. -// * @throws RDFHandlerException -// * If the configured statement handler encountered an unrecoverable -// * error. -// * @throws IllegalArgumentException -// * If the supplied input stream or base URI is <tt>null</tt>. -// */ -// public synchronized void parse(InputStream in, String baseURI) -// throws IOException, RDFParseException, RDFHandlerException -// { -// if (in == null) { -// throw new IllegalArgumentException("Input stream must not be 'null'"); -// } -// // Note: baseURI will be checked in parse(Reader, String) +// /** +// * Implementation of the <tt>parse(InputStream, String)</tt> method defined +// * in the RDFParser interface. +// * +// * @param in +// * The InputStream from which to read the data, must not be +// * <tt>null</tt>. The InputStream is supposed to contain UTF-8 encoded +// * Unicode characters, as per the Turtle specification. +// * @param baseURI +// * The URI associated with the data in the InputStream, must not be +// * <tt>null</tt>. +// * @throws IOException +// * If an I/O error occurred while data was read from the InputStream. +// * @throws RDFParseException +// * If the parser has found an unrecoverable parse error. +// * @throws RDFHandlerException +// * If the configured statement handler encountered an unrecoverable +// * error. +// * @throws IllegalArgumentException +// * If the supplied input stream or base URI is <tt>null</tt>. +// */ +// public synchronized void parse(InputStream in, String baseURI) +// throws IOException, RDFParseException, RDFHandlerException +// { +// if (in == null) { +// throw new IllegalArgumentException("Input stream must not be 'null'"); +// } +// // Note: baseURI will be checked in parse(Reader, String) // -// try { -// parse(new InputStreamReader(in, "UTF-8"), baseURI); -// } -// catch (UnsupportedEncodingException e) { -// // Every platform should support the UTF-8 encoding... -// throw new RuntimeException(e); -// } -// } +// try { +// parse(new InputStreamReader(in, "UTF-8"), baseURI); +// } +// catch (UnsupportedEncodingException e) { +// // Every platform should support the UTF-8 encoding... +// throw new RuntimeException(e); +// } +// } // -// /** -// * Implementation of the <tt>parse(Reader, String)</tt> method defined in the -// * RDFParser interface. -// * -// * @param reader -// * The Reader from which to read the data, must not be <tt>null</tt>. -// * @param baseURI -// * The URI associated with the data in the Reader, must not be -// * <tt>null</tt>. -// * @throws IOException -// * If an I/O error occurred while data was read from the InputStream. -// * @throws RDFParseException -// * If the parser has found an unrecoverable parse error. -// * @throws RDFHandlerException -// * If the configured statement handler encountered an unrecoverable -// * error. -// * @throws IllegalArgumentException -// * If the supplied reader or base URI is <tt>null</tt>. -// */ -// public synchronized void parse(Reader reader, String baseURI) -// throws IOException, RDFParseException, RDFHandlerException -// { -// if (reader == null) { -// throw new IllegalArgumentException("Reader must not be 'null'"); -// } -// if (baseURI == null) { -// throw new IllegalArgumentException("base URI must not be 'null'"); -// } +// /** +// * Implementation of the <tt>parse(Reader, String)</tt> method defined in the +// * RDFParser interface. +// * +// * @param reader +// * The Reader from which to read the data, must not be <tt>null</tt>. +// * @param baseURI +// * The URI associated with the data in the Reader, must not be +// * <tt>null</tt>. +// * @throws IOException +// * If an I/O error occurred while data was read from the InputStream. +// * @throws RDFParseException +// * If the parser has found an unrecoverable parse error. +// * @throws RDFHandlerException +// * If the configured statement handler encountered an unrecoverable +// * error. +// * @throws IllegalArgumentException +// * If the supplied reader or base URI is <tt>null</tt>. +// */ +// public synchronized void parse(Reader reader, String baseURI) +// throws IOException, RDFParseException, RDFHandlerException +// { +// if (reader == null) { +// throw new IllegalArgumentException("Reader must not be 'null'"); +// } +// if (baseURI == null) { +// throw new IllegalArgumentException("base URI must not be 'null'"); +// } // -// rdfHandler.startRDF(); +// rdfHandler.startRDF(); // -// lineReader = new LineNumberReader(reader); -// // Start counting lines at 1: -// lineReader.setLineNumber(1); +// lineReader = new LineNumberReader(reader); +// // Start counting lines at 1: +// lineReader.setLineNumber(1); // -// // Allow at most 2 characters to be pushed back: -// this.reader = new PushbackReader(lineReader, 2); +// // Allow at most 2 characters to be pushed back: +// this.reader = new PushbackReader(lineReader, 2); // -// // Store normalized base URI -// setBaseURI(baseURI); +// // Store normalized base URI +// setBaseURI(baseURI); // -// reportLocation(); +// reportLocation(); // -// try { -// int c = skipWSC(); +// try { +// int c = skipWSC(); // -// while (c != -1) { -// parseStatement(); -// c = skipWSC(); -// } -// } -// finally { -// clear(); -// } +// while (c != -1) { +// parseStatement(); +// c = skipWSC(); +// } +// } +// finally { +// clear(); +// } // -// rdfHandler.endRDF(); -// } +// rdfHandler.endRDF(); +// } // -// protected void parseStatement() -// throws IOException, RDFParseException, RDFHandlerException -// { -// int c = peek(); +// protected void parseStatement() +// throws IOException, RDFParseException, RDFHandlerException +// { +// int c = peek(); // -// if (c == '@') { -// parseDirective(); -// skipWSC(); -// verifyCharacter(read(), "."); -// } -// else { -// parseTriples(); -// skipWSC(); -// verifyCharacter(read(), "."); -// } -// } +// if (c == '@') { +// parseDirective(); +// skipWSC(); +// verifyCharacter(read(), "."); +// } +// else { +// parseTriples(); +// skipWSC(); +// verifyCharacter(read(), "."); +// } +// } // -// protected void parseDirective() -// throws IOException, RDFParseException, RDFHandlerException -// { -// // Verify that the first characters form the string "prefix" -// verifyCharacter(read(), "@"); +// protected void parseDirective() +// throws IOException, RDFParseException, RDFHandlerException +// { +// // Verify that the first characters form the string "prefix" +// verifyCharacter(read(), "@"); // -// StringBuilder sb = new StringBuilder(8); +// StringBuilder sb = new StringBuilder(8); // -// int c = read(); -// while (c != -1 && !TurtleUtil.isWhitespace(c)) { -// sb.append((char)c); -// c = read(); -// } +// int c = read(); +// while (c != -1 && !TurtleUtil.isWhitespace(c)) { +// sb.append((char)c); +// c = read(); +// } // -// String directive = sb.toString(); -// if (directive.equals("prefix")) { -// parsePrefixID(); -// } -// else if (directive.equals("base")) { -// parseBase(); -// } -// else if (directive.length() == 0) { -// reportFatalError("Directive name is missing, expected @prefix or @base"); -// } -// else { -// reportFatalError("Unknown directive \"@" + directive + "\""); -// } -// } +// String directive = sb.toString(); +// if (directive.equals("prefix")) { +// parsePrefixID(); +// } +// else if (directive.equals("base")) { +// parseBase(); +// } +// else if (directive.length() == 0) { +// reportFatalError("Directive name is missing, expected @prefix or @base"); +// } +// else { +// reportFatalError("Unknown directive \"@" + directive + "\""); +// } +// } // -// protected void parsePrefixID() -// throws IOException, RDFParseException, RDFHandlerException -// { -// skipWSC(); +// protected void parsePrefixID() +// throws IOException, RDFParseException, RDFHandlerException +// { +// skipWSC(); // -// // Read prefix ID (e.g. "rdf:" or ":") -// StringBuilder prefixID = new StringBuilder(8); +// // Read prefix ID (e.g. "rdf:" or ":") +// StringBuilder prefixID = new StringBuilder(8); // -// while (true) { -// int c = read(); +// while (true) { +// int c = read(); // -// if (c == ':') { -// unread(c); -// break; -// } -// else if (TurtleUtil.isWhitespace(c)) { -// break; -// } -// else if (c == -1) { -// throwEOFException(); -// } +// if (c == ':') { +// unread(c); +// break; +// } +// else if (TurtleUtil.isWhitespace(c)) { +// break; +// } +// else if (c == -1) { +// throwEOFException(); +// } // -// prefixID.append((char)c); -// } +// prefixID.append((char)c); +// } // -// skipWSC(); +// skipWSC(); // -// verifyCharacter(read(), ":"); +// verifyCharacter(read(), ":"); // -// skipWSC(); +// skipWSC(); // -// // Read the namespace URI -// URI namespace = parseURI(); +// // Read the namespace URI +// URI namespace = parseURI(); // -// // Store and report this namespace mapping -// String prefixStr = prefixID.toString(); -// String namespaceStr = namespace.toString(); +// // Store and report this namespace mapping +// String prefixStr = prefixID.toString(); +// String namespaceStr = namespace.toString(); // -// setNamespace(prefixStr, namespaceStr); +// setNamespace(prefixStr, namespaceStr); // -// rdfHandler.handleNamespace(prefixStr, namespaceStr); -// } +// rdfHandler.handleNamespace(prefixStr, namespaceStr); +// } // -// protected void parseBase() -// throws IOException, RDFParseException, RDFHandlerException -// { -// skipWSC(); +// protected void parseBase() +// throws IOException, RDFParseException, RDFHandlerException +// { +// skipWSC(); // -// URI baseURI = parseURI(); +// URI baseURI = parseURI(); // -// setBaseURI(baseURI.toString()); -// } +// setBaseURI(baseURI.toString()); +// } // -// protected void parseTriples() -// throws IOException, RDFParseException, RDFHandlerException -// { -// parseSubject(); -// skipWSC(); -// parsePredicateObjectList(); +// protected void parseTriples() +// throws IOException, RDFParseException, RDFHandlerException +// { +// parseSubject(); +// skipWSC(); +// parsePredicateObjectList(); // -// subject = null; -// predicate = null; -// object = null; -// } +// subject = null; +// predicate = null; +// object = null; +// } // -// protected void parsePredicateObjectList() -// throws IOException, RDFParseException, RDFHandlerException -// { -// predicate = parsePredicate(); +// protected void parsePredicateObjectList() +// throws IOException, RDFParseException, RDFHandlerException +// { +// predicate = parsePredicate(); // -// skipWSC(); +// skipWSC(); // -// parseObjectList(); +// parseObjectList(); // -// while (skipWSC() == ';') { -// read(); +// while (skipWSC() == ';') { +// read(); // -// int c = skipWSC(); +// int c = skipWSC(); // -// if (c == '.' || // end of triple -// c == ']') // end of predicateObjectList inside blank node -// { -// break; -// } +// if (c == '.' || // end of triple +// c == ']') // end of predicateObjectList inside blank node +// { +// break; +// } // -// predicate = parsePredicate(); +// predicate = parsePredicate(); // -// skipWSC(); +// skipWSC(); // -// parseObjectList(); -// } -// } +// parseObjectList(); +// } +// } // -// protected void parseObjectList() -// throws IOException, RDFParseException, RDFHandlerException -// { -// parseObject(); +// protected void parseObjectList() +// throws IOException, RDFParseException, RDFHandlerException +// { +// parseObject(); // -// while (skipWSC() == ',') { -// read(); -// skipWSC(); -// parseObject(); -// } -// } +// while (skipWSC() == ',') { +// read(); +// skipWSC(); +// parseObject(); +// } +// } // -// protected void parseSubject() -// throws IOException, RDFParseException, RDFHandlerException -// { -// int c = peek(); +// protected void parseSubject() +// throws IOException, RDFParseException, RDFHandlerException +// { +// int c = peek(); // -// if (c == '(') { -// subject = parseCollection(); -// } -// else if (c == '[') { -// subject = parseImplicitBlank(); -// } -// else { -// Value value = parseValue(); +// if (c == '(') { +// subject = parseCollection(); +// } +// else if (c == '[') { +// subject = parseImplicitBlank(); +// } +// else { +// Value value = parseValue(); // -// if (value instanceof Resource) { -// subject = (Resource)value; -// } -// else { -// reportFatalError("Illegal subject value: " + value); -// } -// } -// } +// if (value instanceof Resource) { +// subject = (Resource)value; +// } +// else { +// reportFatalError("Illegal subject value: " + value); +// } +// } +// } // -// protected URI parsePredicate() -// throws IOException, RDFParseException -// { -// // Check if the short-cut 'a' is used -// int c1 = read(); +// protected URI parsePredicate() +// throws IOException, RDFParseException +// { +// // Check if the short-cut 'a' is used +// int c1 = read(); // -// if (c1 == 'a') { -// int c2 = read(); +// if (c1 == 'a') { +// int c2 = read(); // -// if (TurtleUtil.isWhitespace(c2)) { -// // Short-cut is used, return the rdf:type URI -// return RDF.TYPE; -// } +// if (TurtleUtil.isWhitespace(c2)) { +// // Short-cut is used, return the rdf:type URI +// return RDF.TYPE; +// } // -// // Short-cut is not used, unread all characters -// unread(c2); -// } -// unread(c1); +// // Short-cut is not used, unread all characters +// unread(c2); +// } +// unread(c1); // -// // Predicate is a normal resource -// Value predicate = parseValue(); -// if (predicate instanceof URI) { -// return (URI)predicate; -// } -// else { -// reportFatalError("Illegal predicate value: " + predicate); -// return null; -// } -// } +// // Predicate is a normal resource +// Value predicate = parseValue(); +// if (predicate instanceof URI) { +// return (URI)predicate; +// } +// else { +// reportFatalError("Illegal predicate value: " + predicate); +// return null; +// } +// } // -// protected void parseObject() -// throws IOException, RDFParseException, RDFHandlerException -// { -// int c = peek(); +// protected void parseObject() +// throws IOException, RDFParseException, RDFHandlerException +// { +// int c = peek(); // -// if (c == '(') { -// object = parseCollection(); -// } -// else if (c == '[') { -// object = parseImplicitBlank(); -// } -// else { -// object = parseValue(); -// } +// if (c == '(') { +// object = parseCollection(); +// } +// else if (c == '[') { +// object = parseImplicitBlank(); +// } +// else { +// object = parseValue(); +// } // -// reportStatement(subject, predicate, object); -// } +// reportStatement(subject, predicate, object); +// } // -// /** -// * Parses a collection, e.g. <tt>( item1 item2 item3 )</tt>. -// */ -// protected Resource parseCollection() -// throws IOException, RDFParseException, RDFHandlerException -// { -// verifyCharacter(read(), "("); +// /** +// * Parses a collection, e.g. <tt>( item1 item2 item3 )</tt>. +// */ +// protected Resource parseCollection() +// throws IOException, RDFParseException, RDFHandlerException +// { +// verifyCharacter(read(), "("); // -// int c = skipWSC(); +// int c = skipWSC(); // -// if (c == ')') { -// // Empty list -// read(); -// return RDF.NIL; -// } -// else { -// BNode listRoot = createBNode(); +// if (c == ')') { +// // Empty list +// read(); +// return RDF.NIL; +// } +// else { +// BNode listRoot = createBNode(); // -// // Remember current subject and predicate -// Resource oldSubject = subject; -// URI oldPredicate = predicate; +// // Remember current subject and predicate +// Resource oldSubject = subject; +// URI oldPredicate = predicate; // -// // generated bNode becomes subject, predicate becomes rdf:first -// subject = listRoot; -// predicate = RDF.FIRST; +// // generated bNode becomes subject, predicate becomes rdf:first +// subject = listRoot; +// predicate = RDF.FIRST; // -// parseObject(); +// parseObject(); // -// BNode bNode = listRoot; +// BNode bNode = listRoot; // -// while (skipWSC() != ')') { -// // Create another list node and link it to the previous -// BNode newNode = createBNode(); -// reportStatement(bNode, RDF.REST, newNode); +// while (skipWSC() != ')') { +// // Create another list node and link it to the previous +// BNode newNode = createBNode(); +// reportStatement(bNode, RDF.REST, newNode); // -// // New node becomes the current -// subject = bNode = newNode; +// // New node becomes the current +// subject = bNode = newNode; // -// parseObject(); -// } +// parseObject(); +// } // -// // Skip ')' -// read(); +// // Skip ')' +// read(); // -// // Close the list -// reportStatement(bNode, RDF.REST, RDF.NIL); +// // Close the list +// reportStatement(bNode, RDF.REST, RDF.NIL); // -// // Restore previous subject and predicate -// subject = oldSubject; -// predicate = oldPredicate; +// // Restore previous subject and predicate +// subject = oldSubject; +// predicate = oldPredicate; // -// return listRoot; -// } -// } +// return listRoot; +// } +// } // -// /** -// * Parses an implicit blank node. This method parses the token <tt>[]</tt> -// * and predicateObjectLists that are surrounded by square brackets. -// */ -// protected Resource parseImplicitBlank() -// throws IOException, RDFParseException, RDFHandlerException -// { -// verifyCharacter(read(), "["); +// /** +// * Parses an implicit blank node. This method parses the token <tt>[]</tt> +// * and predicateObjectLists that are surrounded by square brackets. +// */ +// protected Resource parseImplicitBlank() +// throws IOException, RDFParseException, RDFHandlerException +// { +// verifyCharacter(read(), "["); // -// BNode bNode = createBNode(); +// BNode bNode = createBNode(); // -// int c = read(); -// if (c != ']') { -// unread(c); +// int c = read(); +// if (c != ']') { +// unread(c); // -// // Remember current subject and predicate -// Resource oldSubject = subject; -// URI oldPredicate = predicate; +// // Remember current subject and predicate +// Resource oldSubject = subject; +// URI oldPredicate = predicate; // -// // generated bNode becomes subject -// subject = bNode; +// // generated bNode becomes subject +// subject = bNode; // -// // Enter recursion with nested predicate-object list -// skipWSC(); +// // Enter recursion with nested predicate-object list +// skipWSC(); // -// parsePredicateObjectList(); +// parsePredicateObjectList(); // -// skipWSC(); +// skipWSC(); // -// // Read closing bracket -// verifyCharacter(read(), "]"); +// // Read closing bracket +// verifyCharacter(read(), "]"); // -// // Restore previous subject and predicate -// subject = oldSubject; -// predicate = oldPredicate; -// } +// // Restore previous subject and predicate +// subject = oldSubject; +// predicate = oldPredicate; +// } // -// return bNode; -// } +// return bNode; +// } // - /** - * Parses an RDF value. This method parses uriref, qname, node ID, quoted - * literal, integer, double and boolean. - */ - protected Value parseValue() - throws IOException, RDFParseException - { - int c = peek(); - - if (c == '<') { - // uriref, e.g. <foo://bar> or sidref <<a> <b> <c>> - return parseURIOrSid(); - } - else if (c == ':' || TurtleUtil.isPrefixStartChar(c)) { - // qname or boolean - return parseQNameOrBoolean(); - } - else if (c == '_') { - // node ID, e.g. _:n1 - return parseNodeID(); - } - else if (c == '"') { - // quoted literal, e.g. "foo" or """foo""" - return parseQuotedLiteral(); - } - else if (ASCIIUtil.isNumber(c) || c == '.' || c == '+' || c == '-') { - // integer or double, e.g. 123 or 1.2e3 - return parseNumber(); - } - else if (c == -1) { - throwEOFException(); - return null; - } - else { - Thread.dumpStack(); - while (c != -1) System.err.print((char) (c = read())); - reportFatalError("Expected an RDF value here, found '" + (char)c + "'"); - return null; - } - } + /** + * Parses an RDF value. This method parses uriref, qname, node ID, quoted + * literal, integer, double and boolean. + */ + protected Value parseValue() + throws IOException, RDFParseException + { + int c = peek(); + + if (c == '<') { + // uriref, e.g. <foo://bar> or sidref <<a> <b> <c>> + return parseURIOrSid(); + } + else if (c == ':' || TurtleUtil.isPrefixStartChar(c)) { + // qname or boolean + return parseQNameOrBoolean(); + } + else if (c == '_') { + // node ID, e.g. _:n1 + return parseNodeID(); + } + else if (c == '"') { + // quoted literal, e.g. "foo" or """foo""" + return parseQuotedLiteral(); + } + else if (ASCIIUtil.isNumber(c) || c == '.' || c == '+' || c == '-') { + // integer or double, e.g. 123 or 1.2e3 + return parseNumber(); + } + else if (c == -1) { + throwEOFException(); + return null; + } + else { + Thread.dumpStack(); + while (c != -1) System.err.print((char) (c = read())); + reportFatalError("Expected an RDF value here, found '" + (char)c + "'"); + return null; + } + } // -// /** -// * Parses a quoted string, optionally followed by a language tag or datatype. -// */ -// protected Literal parseQuotedLiteral() -// throws IOException, RDFParseException -// { -// String label = parseQuotedString(); +// /** +// * Parses a quoted string, optionally followed by a language tag or datatype. +// */ +// protected Literal parseQuotedLiteral() +// throws IOException, RDFParseException +// { +// String label = parseQuotedString(); // -// // Check for presence of a language tag or datatype -// int c = peek(); +// // Check for presence of a language tag or datatype +// int c = peek(); // -// if (c == '@') { -// read(); +// if (c == '@') { +// read(); // -// // Read language -// StringBuilder lang = new StringBuilder(8); +// // Read language +// StringBuilder lang = new StringBuilder(8); // -// c = read(); -// if (c == -1) { -// throwEOFException(); -// } -// if (!TurtleUtil.isLanguageStartChar(c)) { -// reportError("Expected a letter, found '" + (char)c + "'"); -// } +// c = read(); +// if (c == -1) { +// throwEOFException(); +// } +// if (!TurtleUtil.isLanguageStartChar(c)) { +// reportError("Expected a letter, found '" + (char)c + "'"); +// } // -// lang.append((char)c); +// lang.append((char)c); // -// c = read(); -// while (TurtleUtil.isLanguageChar(c)) { -// lang.append((char)c); -// c = read(); -// } +// c = read(); +// while (TurtleUtil.isLanguageChar(c)) { +// lang.append((char)c); +// c = read(); +// } // -// unread(c); +// unread(c); // -// return createLiteral(label, lang.toString(), null); -// } -// else if (c == '^') { -// read(); +// return createLiteral(label, lang.toString(), null); +// } +// else if (c == '^') { +// read(); // -// // next character should be another '^' -// verifyCharacter(read(), "^"); +// // next character should be another '^' +// verifyCharacter(read(), "^"); // -// // Read datatype -// Value datatype = parseValue(); -// if (datatype instanceof URI) { -// return createLiteral(label, null, (URI)datatype); -// } -// else { -// reportFatalError("Illegal datatype value: " + datatype); -// return null; -// } -// } -// else { -// return createLiteral(label, null, null); -// } -// } +// // Read datatype +// Value datatype = parseValue(); +// if (datatype instanceof URI) { +// return createLiteral(label, null, (URI)datatype); +// } +// else { +// reportFatalError("Illegal datatype value: " + datatype); +// return null; +// } +// } +// else { +// return createLiteral(label, null, null); +// } +// } // -// /** -// * Parses a quoted string, which is either a "normal string" or a """long -// * string""". -// */ -// protected String parseQuotedString() -// throws IOException, RDFParseException -// { -// String result = null; +// /** +// * Parses a quoted string, which is either a "normal string" or a """long +// * string""". +// */ +// protected String parseQuotedString() +// throws IOException, RDFParseException +// { +// String result = null; // -// // First character should be '"' -// verifyCharacter(read(), "\""); +// // First character should be '"' +// verifyCharacter(read(), "\""); // -// // Check for long-string, which starts and ends with three double quotes -// int c2 = read(); -// int c3 = read(); +// // Check for long-string, which starts and ends with three double quotes +// int c2 = read(); +// int c3 = read(); // -// if (c2 == '"' && c3 == '"') { -// // Long string -// result = parseLongString(); -// } -// else { -// // Normal string -// unread(c3); -// unread(c2); +// if (c2 == '"' && c3 == '"') { +// // Long string +// result = parseLongString(); +// } +// else { +// // Normal string +// unread(c3); +// unread(c2); // -// result = parseString(); -// } +// result = parseString(); +// } // -// // Unescape any escape sequences -// try { -// result = TurtleUtil.decodeString(result); -// } -// catch (IllegalArgumentException e) { -// reportError(e.getMessage()); -// } +// // Unescape any escape sequences +// try { +// result = TurtleUtil.decodeString(result); +// } +// catch (IllegalArgumentException e) { +// reportError(e.getMessage()); +// } // -// return result; -// } +// return result; +// } // -// /** -// * Parses a "normal string". This method assumes that the first double quote -// * has already been parsed. -// */ -// protected String parseString() -// throws IOException, RDFParseException -// { -// StringBuilder sb = new StringBuilder(32); +// /** +// * Parses a "normal string". This method assumes that the first double quote +// * has already been parsed. +// */ +// protected String parseString() +// throws IOException, RDFParseException +// { +// StringBuilder sb = new StringBuilder(32); // -// while (true) { -// int c = read(); +// while (true) { +// int c = read(); // -// if (c == '"') { -// break; -// } -// else if (c == -1) { -// throwEOFException(); -// } +// if (c == '"') { +// break; +// } +// else if (c == -1) { +// throwEOFException(); +// } // -// sb.append((char)c); +// sb.append((char)c); // -// if (c == '\\') { -// // This escapes the next character, which might be a '"' -// c = read(); -// if (c == -1) { -// throwEOFException(); -// } -// sb.append((char)c); -// } -// } +// if (c == '\\') { +// // This escapes the next character, which might be a '"' +// c = read(); +// if (c == -1) { +// throwEOFException(); +// } +// sb.append((char)c); +// } +// } // -// return sb.toString(); -// } +// return sb.toString(); +// } // -// /** -// * Parses a """long string""". This method assumes that the first three -// * double quotes have already been parsed. -// */ -// protected String parseLongString() -// throws IOException, RDFParseException -// { -// StringBuilder sb = new StringBuilder(1024); +// /** +// * Parses a """long string""". This method assumes that the first three +// * double quotes have already been parsed. +// */ +// protected String parseLongString() +// throws IOException, RDFParseException +// { +// StringBuilder sb = new StringBuilder(1024); // -// int doubleQuoteCount = 0; -// int c; +// int doubleQuoteCount = 0; +// int c; // -// while (doubleQuoteCount < 3) { -// c = read(); +// while (doubleQuoteCount < 3) { +// c = read(); // -// if (c == -1) { -// throwEOFException(); -// } -// else if (c == '"') { -// doubleQuoteCount++; -// } -// else { -// doubleQuoteCount = 0; -// } +// if (c == -1) { +// throwEOFException(); +// } +// else if (c == '"') { +// doubleQuoteCount++; +// } +// else { +// doubleQuoteCount = 0; +// } // -// sb.append((char)c); +// sb.append((char)c); // -// if (c == '\\') { -// // This escapes the next character, which might be a '"' -// c = read(); -// if (c == -1) { -// throwEOFException(); -// } -// sb.append((char)c); -// } -// } +// if (c == '\\') { +// // This escapes the next character, which might be a '"' +// c = read(); +// if (c == -1) { +// throwEOFException(); +// } +// sb.append((char)c); +// } +// } // -// return sb.substring(0, sb.length() - 3); -// } +// return sb.substring(0, sb.length() - 3); +// } // -// protected Literal parseNumber() -// throws IOException, RDFParseException -// { -// StringBuilder value = new StringBuilder(8); -// URI datatype = XMLSchema.INTEGER; +// protected Literal parseNumber() +// throws IOException, RDFParseException +// { +// StringBuilder value = new StringBuilder(8); +// URI datatype = XMLSchema.INTEGER; // -// int c = read(); +// int c = read(); // -// // read optional sign character -// if (c == '+' || c == '-') { -// value.append((char)c); -// c = read(); -// } +// // read optional sign character +// if (c == '+' || c == '-') { +// value.append((char)c); +// c = read(); +// } // -// while (ASCIIUtil.isNumber(c)) { -// value.append((char)c); -// c = read(); -// } +// while (ASCIIUtil.isNumber(c)) { +// value.append((char)c); +// c = read(); +// } // -// if (c == '.' || c == 'e' || c == 'E') { -// // We're parsing a decimal or a double -// datatype = XMLSchema.DECIMAL; +// if (c == '.' || c == 'e' || c == 'E') { +// // We're parsing a decimal or a double +// datatype = XMLSchema.DECIMAL; // -// // read optional fractional digits -// if (c == '.') { -// value.append((char)c); +// // read optional fractional digits +// if (c == '.') { +// value.append((char)c); // -// c = read(); -// while (ASCIIUtil.isNumber(c)) { -// value.append((char)c); -// c = read(); -// } +// c = read(); +// while (ASCIIUtil.isNumber(c)) { +// value.append((char)c); +// c = read(); +// } // -// if (value.length() == 1) { -// // We've only parsed a '.' -// reportFatalError("Object for statement missing"); -// } -// } -// else { -// if (value.length() == 0) { -// // We've only parsed an 'e' or 'E' -// reportFatalError("Object for statement missing"); -// } -// } +// if (value.length() == 1) { +// // We've only parsed a '.' +// reportFatalError("Object for statement missing"); +// } +// } +// else { +// if (value.length() == 0) { +// // We've only parsed an 'e' or 'E' +// reportFatalError("Object for statement missing"); +// } +// } // -// // read optional exponent -// if (c == 'e' || c == 'E') { -// datatype = XMLSchema.DOUBLE; -// value.append((char)c); +// // read optional exponent +// if (c == 'e' || c == 'E') { +// datatype = XMLSchema.DOUBLE; +// value.append((char)c); // -// c = read(); -// if (c == '+' || c == '-') { -// value.append((char)c); -// c = read(); -// } +// c = read(); +// if (c == '+' || c == '-') { +// value.append((char)c); +// c = read(); +// } // -// if (!ASCIIUtil.isNumber(c)) { -// reportError("Exponent value missing"); -// } +// if (!ASCIIUtil.isNumber(c)) { +// reportError("Exponent value missing"); +// } // -// value.append((char)c); +// value.append((char)c); // -// c = read(); -// while (ASCIIUtil.isNumber(c)) { -// value.append((char)c); -// c = read(); -// } -// } -// } +// c = read(); +// while (ASCIIUtil.isNumber(c)) { +// value.append((char)c); +// c = read(); +// } +// } +// } // -// // Unread last character, it isn't part of the number -// unread(c); +// // Unread last character, it isn't part of the number +// unread(c); // -// // String label = value.toString(); -// // if (datatype.equals(XMLSchema.INTEGER)) { -// // try { -// // label = XMLDatatypeUtil.normalizeInteger(label); -// // } -// // catch (IllegalArgumentException e) { -// // // Note: this should never happen because of the parse constraints -// // reportError("Illegal integer value: " + label); -// // } -// // } -// // return createLiteral(label, null, datatype); +// // String label = value.toString(); +// // if (datatype.equals(XMLSchema.INTEGER)) { +// // try { +// // label = XMLDatatypeUtil.normalizeInteger(label); +// // } +// // catch (IllegalArgumentException e) { +// // // Note: this should never happen because of the parse constraints +// // reportError("Illegal integer value: " + label); +// // } +// // } +// // return createLiteral(label, null, datatype); // -// // Return result as a typed literal -// return createLiteral(value.toString(), null, datatype); -// } +// // Return result as a typed literal +// return createLiteral(value.toString(), null, datatype); +// } // - protected Value parseURIOrSid() - throws IOException, RDFParseException - { - // First character should be '<' - int c = read(); - verifyCharacter(c, "<"); - - int n = peek(); - if (n == '<') { - read(); - if (this.valueFactory == null) { - reportError("must use a BigdataValueFactory to use the RDR syntax"); - } - return parseSid(); - } else { - unread(c); - return parseURI(); - } - } - - protected Value parseSid() - throws IOException, RDFParseException - { - Resource s = (Resource) parseValue(); - - skipWS(); - URI p = (URI) parseValue(); - - skipWS(); - Value o = parseValue(); + protected Value parseURIOrSid() + throws IOException, RDFParseException + { + // First character should be '<' + int c = read(); + verifyCharacter(c, "<"); + + int n = peek(); + if (n == '<') { + read(); + if (this.valueFactory == null) { + reportError("must use a BigdataValueFactory to use the RDR syntax"); + } + return parseSid(); + } else { + unread(c); + return parseURI(); + } + } + + protected Value parseSid() + throws IOException, RDFParseException + { + Resource s = (Resource) parseValue(); + + skipWS(); + URI p = (URI) parseValue(); + + skipWS(); + Value o = parseValue(); - int i = read(); - while (TurtleUtil.isWhitespace(i)) { - i = read(); - } - - if (i == '>' && read() == '>') { - return valueFactory.createBNode(valueFactory.createStatement(s, p, o)); - } else { - reportError("expecting >> to close statement identifier"); - throw new IOException(); - } - - } + int i = read(); + while (TurtleUtil.isWhitespace(i)) { + i = read(); + } + + if (i == '>' && read() == '>') { + if (valueFactory == null) { + /* + * The BigdataValueFactory has an extension to create a BNode + * from a Statement. You need to specify that value factory + * when you create the parser using setValueFactory(). + */ + throw new RDFParseException( + "You must set a ValueFactory to use the RDR syntax"); + } + return valueFactory.createBNode(valueFactory.createStatement(s, p, o)); + } else { + reportError("expecting >> to close statement identifier"); + throw new IOException(); + } + + } // -// protected URI parseURI() -// throws IOException, RDFParseException -// { -// StringBuilder uriBuf = new StringBuilder(100); +// protected URI parseURI() +// throws IOException, RDFParseException +// { +// StringBuilder uriBuf = new StringBuilder(100); // -// // First character should be '<' -// int c = read(); -// verifyCharacter(c, "<"); +// // First character should be '<' +// int c = read(); +// verifyCharacter(c, "<"); // -// // Read up to the next '>' character -// while (true) { -// c = read(); +// // Read up to the next '>' character +// while (true) { +// c = read(); // -// if (c == '>') { -// break; -// } -// else if (c == -1) { -// throwEOFException(); -// } +// if (c == '>') { +// break; +// } +// else if (c == -1) { +// throwEOFException(); +// } // -// uriBuf.append((char)c); +// uriBuf.append((char)c); // -// if (c == '\\') { -// // This escapes the next character, which might be a '>' -// c = read(); -// if (c == -1) { -// throwEOFException(); -// } -// uriBuf.append((char)c); -// } -// } +// if (c == '\\') { +// // This escapes the next character, which might be a '>' +// c = read(); +// if (c == -1) { +// throwEOFException(); +// } +// uriBuf.append((char)c); +// } +// } // -// String uri = uriBuf.toString(); +// String uri = uriBuf.toString(); // -// // Unescape any escape sequences -// try { -// uri = TurtleUtil.decodeString(uri); -// } -// catch (IllegalArgumentException e) { -// reportError(e.getMessage()); -// } +// // Unescape any escape sequences +// try { +// uri = TurtleUtil.decodeString(uri); +// } +// catch (IllegalArgumentException e) { +// reportError(e.getMessage()); +// } // -// return super.resolveURI(uri); -// } +// return super.resolveURI(uri); +// } // -// /** -// * Parses qnames and boolean values, which have equivalent starting -// * characters. -// */ -// protected Value parseQNameOrBoolean() -// throws IOException, RDFParseException -// { -// // First character should be a ':' or a letter -// int c = read(); -// if (c == -1) { -// throwEOFException(); -// } -// if (c != ':' && !TurtleUtil.isPrefixStartChar(c)) { -// reportError("Expected a ':' or a letter, found '" + (char)c + "'"); -// } +// /** +// * Parses qnames and boolean values, which have equivalent starting +// * characters. +// */ +// protected Value parseQNameOrBoolean() +// throws IOException, RDFParseException +// { +// // First character should be a ':' or a letter +// int c = read(); +// if (c == -1) { +// throwEOFException(); +// } +// if (c != ':' && !TurtleUtil.isPrefixStartChar(c)) { +// reportError("Expected a ':' or a letter, found '" + (char)c + "'"); +// } // -// String namespace = null; +// String namespace = null; // -// if (c == ':') { -// // qname using default namespace -// namespace = getNamespace(""); -// if (namespace == null) { -// reportError("Default namespace used but not defined"); -// } -// } -// else { -// // c is the first letter of the prefix -// StringBuilder prefix = new StringBuilder(8); -// prefix.append((char)c); +// if (c == ':') { +// // qname using default namespace +// namespace = getNamespace(""); +// if (namespace == null) { +// reportError("Default namespace used but not defined"); +// } +// } +// else { +// // c is the first letter of the prefix +// StringBuilder prefix = new StringBuilder(8); +// prefix.append((char)c); // -// c = read(); -// while (TurtleUtil.isPrefixChar(c)) { -// prefix.append((char)c); -// c = read(); -// } +// c = read(); +// while (TurtleUtil.isPrefixChar(c)) { +// prefix.append((char)c); +// c = read(); +// } // -// if (c != ':') { -// // prefix may actually be a boolean value -// String value = prefix.toString(); +// if (c != ':') { +// // prefix may actually be a boolean value +// String value = prefix.toString(); // -// if (value.equals("true") || value.equals("false")) { -// return createLiteral(value, null, XMLSchema.BOOLEAN); -// } -// } +// if (value.equals("true") || value.equals("false")) { +// return createLiteral(value, null, XMLSchema.BOOLEAN); +// } +// } // -// verifyCharacter(c, ":"); +// verifyCharacter(c, ":"); // -// namespace = getNamespace(prefix.toString()); -// if (namespace == null) { -// reportError("Namespace prefix '" + prefix.toString() + "' used but not defined"); -// } -// } +// namespace = getNamespace(prefix.toString()); +// if (namespace == null) { +// reportError("Namespace prefix '" + prefix.toString() + "' used but not defined"); +// } +// } // -// // c == ':', read optional local name -// StringBuilder localName = new StringBuilder(16); -// c = read(); -// if (TurtleUtil.isNameStartChar(c)) { -// localName.append((char)c); +// // c == ':', read optional local name +// StringBuilder localName = new StringBuilder(16); +// c = read(); +// if (TurtleUtil.isNameStartChar(c)) { +// localName.append((char)c); // -// c = read(); -// while (TurtleUtil.isNameChar(c)) { -// localName.append((char)c); -// c = read(); -// } -// } +// c = read(); +// while (TurtleUtil.isNameChar(c)) { +// localName.append((char)c); +// c = read(); +// } +// } // -// // Unread last character -// unread(c); +// // Unread last character +// unread(c); // -// // Note: namespace has already been resolved -// return createURI(namespace + localName.toString()); -// } +// // Note: namespace has already been resolved +// return createURI(namespace + localName.toString()); +// } // - /** - * Parses a blank node ID, e.g. <tt>_:node1</tt>. - */ - protected BNode parseNodeID() - throws IOException, RDFParseException - { - // Node ID should start with "_:" - verifyCharacter(read(), "_"); - verifyCharacter(read(), ":"); + /** + * Parses a blank node ID, e.g. <tt>_:node1</tt>. + */ + protected BNode parseNodeID() + throws IOException, RDFParseException + { + // Node ID should start with "_:" + verifyCharacter(read(), "_"); + verifyCharacter(read(), ":"); - // Read the node ID - int c = read(); - if (c == -1) { - throwEOFException(); - } + // Read the node ID + int c = read(); + if (c == -1) { + throwEOFException(); + } // modified to allow fully numeric bnode ids -// else if (!TurtleUtil.isNameStartChar(c)) { -// reportError("Expected a letter, found '" + (char)c + "'"); -// } +// else if (!TurtleUtil.isNameStartChar(c)) { +// reportError("Expected a letter, found '" + (char)c + "'"); +// } - StringBuilder name = new StringBuilder(32); - name.append((char)c); + StringBuilder name = new StringBuilder(32); + name.append((char)c); - // Read all following letter and numbers, they are part of the name - c = read(); - while (TurtleUtil.isNameChar(c)) { - name.append((char)c); - c = read(); - } + // Read all following letter and numbers, they are part of the name + c = read(); + while (TurtleUtil.isNameChar(c)) { + name.append((char)c); + c = read(); + } - unread(c); + unread(c); - return createBNode(name.toString()); - } + return createBNode(name.toString()); + } // -// protected void reportStatement(Resource subj, URI pred, Value obj) -// throws RDFParseException, RDFHandlerException -// { -// Statement st = createStatement(subj, pred, obj); -// rdfHandler.handleStatement(st); -// } +// protected void reportStatement(Resource subj, URI pred, Value obj) +// throws RDFParseException, RDFHandlerException +// { +// Statement st = createStatement(subj, pred, obj); +// rdfHandler.handleStatement(st); +// } // -// /** -// * Verifies that the supplied character <tt>c</tt> is one of the expected -// * characters specified in <tt>expected</tt>. This method will throw a -// * <tt>ParseException</tt> if this is not the case. -// */ -// protected void verifyCharacter(int c, String expected) -// throws RDFParseException -// { -// if (c == -1) { -// throwEOFException(); -// } -// else if (expected.indexOf((char)c) == -1) { -// StringBuilder msg = new StringBuilder(32); -// msg.append("Expected "); -// for (int i = 0; i < expected.length(); i++) { -// if (i > 0) { -// msg.append(" or "); -// } -// msg.append('\''); -// msg.append(expected.charAt(i)); -// msg.append('\''); -// } -// msg.append(", found '"); -// msg.append((char)c); -// msg.append("'"); +// /** +// * Verifies that the supplied character <tt>c</tt> is one of the expected +// * characters specified in <tt>expected</tt>. This method will throw a +// * <tt>ParseException</tt> if this is not the case. +// */ +// protected void verifyCharacter(int c, String expected) +// throws RDFParseException +// { +// if (c == -1) { +// throwEOFException(); +// } +// else if (expected.indexOf((char)c) == -1) { +// StringBuilder msg = new StringBuilder(32); +// msg.append("Expected "); +// for (int i = 0; i < expected.length(); i++) { +// if (i > 0) { +// msg.append(" or "); +// } +// msg.append('\''); +// msg.append(expected.charAt(i)); +// msg.append('\''); +// } +// msg.append(", found '"); +// msg.append((char)c); +// msg.append("'"); // -// reportError(msg.toString()); -// } -// } +// reportError(msg.toString()); +// } +// } // - /** - * Consumes any white space characters (space, tab, line feed, newline) and - * comments (#-style) from <tt>reader</tt>. After this method has been - * called, the first character that is returned by <tt>reader</tt> is either - * a non-ignorable character, or EOF. For convenience, this character is also - * returned by this method. - * - * @return The next character that will be returned by <tt>reader</tt>. - */ - protected int skipWS() - throws IOException - { - int c = read(); - while (TurtleUtil.isWhitespace(c)) { - c = read(); - } + /** + * Consumes any white space characters (space, tab, line feed, newline) and + * comments (#-style) from <tt>reader</tt>. After this method has been + * called, the first character that is returned by <tt>reader</tt> is either + * a non-ignorable character, or EOF. For convenience, this character is also + * returned by this method. + * + * @return The next character that will be returned by <tt>reader</tt>. + */ + protected int skipWS() + throws IOException + { + int c = read(); + while (TurtleUtil.isWhitespace(c)) { + c = read(); + } - unread(c); + unread(c); - return c; - } + return c; + } // -// /** -// * Consumes characters from reader until the first EOL has been read. This -// * line of text is then passed to the {@link #rdfHandler} as a comment. -// */ -// protected void processComment() -// throws IOException, RDFHandlerException -// { -// StringBuilder comment = new StringBuilder(64); -// int c = read(); -// while (c != -1 && c != 0xD && c != 0xA) { -// comment.append((char)c); -// c = read(); -// } +// /** +// * Consumes characters from reader until the first EOL has been read. This +// * line of text is then passed to the {@link #rdfHandler} as a comment. +// */ +// protected void processComment() +// throws IOException, RDFHandlerException +// { +// StringBuilder comment = new StringBuilder(64); +// int c = read(); +// while (c != -1 && c != 0xD && c != 0xA) { +// comment.append((char)c); +// c = read(); +// } // -// // c is equal to -1, \r or \n. -// // In case c is equal to \r, we should also read a following \n. -// if (c == 0xD) { -// c = read(); +// // c is equal to -1, \r or \n. +// // In case c is equal to \r, we should also read a following \n. +// if (c == 0xD) { +// c = read(); // -// if (c != 0xA) { -// unread(c); -// } -// } -// rdfHandler.handleComment(comment.toString()); -// reportLocation(); -// } +// if (c != 0xA) { +// unread(c); +// } +// } +// rdfHandler.handleComment(comment.toString()); +// reportLocation(); +// } // -// protected int read() -// throws IOException -// { -// return reader.read(); -// } +// protected int read() +// throws IOException +// { +// return reader.read(); +// } // -// protected void unread(int c) -// throws IOException -// { -// if (c != -1) { -// reader.unread(c); -// } -// } +// protected void unread(int c) +// throws IOException +// { +// if (c != -1) { +// reader.unread(c); +// } +// } // -// protected int peek() -// throws IOException -// { -// int result = read(); -// unread(result); -// return result; -// } +// protected int peek() +// throws IOException +// { +// int result = read(); +// unread(result); +// return result; +// } // -// protected void reportLocation() { -// reportLocation(lineReader.getLineNumber(), -1); -// } +// protected void reportLocation() { +// reportLocation(lineReader.getLineNumber(), -1); +// } // -// /** -// * Overrides {@link RDFParserBase#reportWarning(String)}, adding line number -// * information to the error. -// */ -// @Override -// protected void reportWarning(String msg) { -// reportWarning(msg, lineReader.getLineNumber(), -1); -// } +// /** +// * Overrides {@link RDFParserBase#reportWarning(String)}, adding line number +// * information to the error. +// */ +// @Override +// protected void reportWarning(String msg) { +// reportWarning(msg, lineReader.getLineNumber(), -1); +// } // -// /** -// * Overrides {@link RDFParserBase#reportError(String)}, adding line number -// * information to the error. -// */ -// @Override -// protected void reportError(String msg) -// throws RDFParseException -// { -// reportError(msg, lineReader.getLineNumber(), -1); -// } +// /** +// * Overrides {@link RDFParserBase#reportError(String)}, adding line number +// * information to the error. +// */ +// @Override +// protected void reportError(String msg) +// throws RDFParseException +// { +// reportError(msg, lineReader.getLineNumber(), -1); +// } // -// /** -// * Overrides {@link RDFParserBase#reportFatalError(String)}, adding line -// * number information to the error. -// */ -// @Override -// protected void reportFatalError(String msg) -// throws RDFParseException -// { -// reportFatalError(msg, lineReader.getLineNumber(), -1); -// } +// /** +// * Overrides {@link RDFParserBase#reportFatalError(String)}, adding line +// * number information to the error. +// */ +// @Override +// protected void reportFatalError(String msg) +// throws RDFParseException +// { +// reportFatalError(msg, lineReader.getLineNumber(), -1); +// } // -// /** -// * Overrides {@link RDFParserBase#reportFatalError(Exception)}, adding line -// * number information to the error. -// */ -// @Override -// protected void reportFatalError(Exception e) -// throws RDFParseException -// { -// reportFatalError(e, lineReader.getLineNumber(), -1); -// } +// /** +// * Overrides {@link RDFParserBase#reportFatalError(Exception)}, adding line +// * number information to the error. +// */ +// @Override +// protected void reportFatalError(Exception e) +// throws RDFParseException +// { +// reportFatalError(e, lineReader.getLineNumber(), -1); +// } // -// protected void throwEOFException() -// throws RDFParseException -// { -// throw new RDFParseException("Unexpected end of file"); -// } +// protected void throwEOFException() +// throws RDFParseException +// { +// throw new RDFParseException("Unexpected end of file"); +// } ... [truncated message content] |
From: <tho...@us...> - 2014-02-20 00:02:06
|
Revision: 7852 http://sourceforge.net/p/bigdata/code/7852 Author: thompsonbry Date: 2014-02-20 00:02:00 +0000 (Thu, 20 Feb 2014) Log Message: ----------- javadoc Modified Paths: -------------- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/FuzzySSSP.java Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/FuzzySSSP.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/FuzzySSSP.java 2014-02-19 20:36:35 UTC (rev 7851) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/FuzzySSSP.java 2014-02-20 00:02:00 UTC (rev 7852) @@ -63,12 +63,12 @@ * along the shortest path (which we get from the per-source SSSP traversals). * * TODO Support breaking out of the analytic as soon as the frontier is known to - * contain at least N distinct vertices. Note that for frontier implementations - * that allow duplicates, this means that you need to wait for the end of the - * iteration to make the decision. We already support a decision point at the - * end of each iteration. This would allow us to lift the decision point inside - * of the iteration and terminate processing eagerly when the frontier size - * exceeds a specified value. + * contain at least N(=2k) distinct vertices (or M=10k edges). Note that for + * frontier implementations that allow duplicates, this means that you need to + * wait for the end of the iteration to make the decision. We already support a + * decision point at the end of each iteration. This would allow us to lift the + * decision point inside of the iteration and terminate processing eagerly when + * the frontier size exceeds a specified value. * * TODO: Implement unit test with ground truth. * This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-02-19 20:36:37
|
Revision: 7851 http://sourceforge.net/p/bigdata/code/7851 Author: thompsonbry Date: 2014-02-19 20:36:35 +0000 (Wed, 19 Feb 2014) Log Message: ----------- updated the URLs to point at the new wiki location Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html 2014-02-19 01:47:28 UTC (rev 7850) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html 2014-02-19 20:36:35 UTC (rev 7851) @@ -10,10 +10,10 @@ <h2>Welcome to bigdata®.</h2> <p>Please consult the -<a href="https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=NanoSparqlServer" +<a href="http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer" target="_blank" > documentation</a> for information on using the NanoSparqlServer's REST Api. </br>See the - <a href="https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Main_Page" + <a href="http://wiki.bigdata.com/wiki/index.php/Main_Page" target="_blank" >wiki</a> for help on query optimization, bigdata SPARQL extensions, etc. </p> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-02-19 01:47:37
|
Revision: 7850 http://bigdata.svn.sourceforge.net/bigdata/?rev=7850&view=rev Author: tobycraig Date: 2014-02-19 01:47:28 +0000 (Wed, 19 Feb 2014) Log Message: ----------- Initial new layout with most load functionality Modified Paths: -------------- branches/RDR/bigdata-war/src/html/workbench.js Added Paths: ----------- branches/RDR/bigdata-war/src/html/new.html branches/RDR/bigdata-war/src/html/style.css Added: branches/RDR/bigdata-war/src/html/new.html =================================================================== --- branches/RDR/bigdata-war/src/html/new.html (rev 0) +++ branches/RDR/bigdata-war/src/html/new.html 2014-02-19 01:47:28 UTC (rev 7850) @@ -0,0 +1,69 @@ +<!DOCTYPE html> +<html lang="en"> + <head> + <meta charset="utf-8"> + <title>RedPoint Workbench</title> + <link rel="stylesheet" href="style.css"> + </head> + + <body> + <div id="tab-selector"> + <a data-target="load" class="active">Load</a> + <a data-target="query">Query</a> + <a data-target="explore">Explore</a> + <a data-target="status">Status</a> + <a data-target="namespaces">Namespaces</a> + </div> + + <div id="current-namespace"> + Current namespace: <span>kb</span> + </div> + + <div class="tab" id="load"> + <input id="load-file" type="file" name="file"> + <input id="load-hidden" type="hidden" name="large-file-contents"> + <p id="large-file-message" style="display: none;">Your file is too large to display here, but will be uploaded as normal.</p> + <br> + <textarea id="load-box" name="textarea" rows="10" cols="80"></textarea> + <br> + Type: <select id="load-type"> + <option value="sparql" selected="selected">SPARQL</option> + <option value="rdf">RDF</option> + <option value="path">File path</option> + </select> + <select id="rdf-type" style="display: none;"> + <option value="">Select RDF format</option> + <option value="n-quads">N-Quads</option> + <option value="n-triples">N-Triples</option> + <option value="n3">Notation3</option> + <option value="rdf/xml">RDF/XML</option> + <option value="trig">TriG</option> + <option value="trix">TriX</option> + <option value="turtle">Turtle</option> + </select> + <br> + <button>Load</button> + <hr> + <p>Response:</p> + <pre id="response"></pre> + + </div> + + <div class="tab" id="query">query + </div> + + <div class="tab" id="explore">explore + </div> + + <div class="tab" id="status">status + </div> + + <div class="tab" id="namespaces">namespaces + </div> + + <!--[if IE]><script src="//html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> + <script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script> + <script>window.jQuery || document.write('<script src="/jquery.min.js"><\/script>')</script> + <script src="workbench.js"></script> + </body> +</html> Property changes on: branches/RDR/bigdata-war/src/html/new.html ___________________________________________________________________ Added: svn:mime-type + text/plain Added: svn:keywords + Id Date Revision Author HeadURL Added: branches/RDR/bigdata-war/src/html/style.css =================================================================== --- branches/RDR/bigdata-war/src/html/style.css (rev 0) +++ branches/RDR/bigdata-war/src/html/style.css 2014-02-19 01:47:28 UTC (rev 7850) @@ -0,0 +1,84 @@ +/* http://meyerweb.com/eric/tools/css/reset/ + v2.0 | 20110126 + License: none (public domain) +*/ + +html, body, div, span, applet, object, iframe, +h1, h2, h3, h4, h5, h6, p, blockquote, pre, +a, abbr, acronym, address, big, cite, code, +del, dfn, em, img, ins, kbd, q, s, samp, +small, strike, strong, sub, sup, tt, var, +b, u, i, center, +dl, dt, dd, ol, ul, li, +fieldset, form, label, legend, +table, caption, tbody, tfoot, thead, tr, th, td, +article, aside, canvas, details, embed, +figure, figcaption, footer, header, hgroup, +menu, nav, output, ruby, section, summary, +time, mark, audio, video { + margin: 0; + padding: 0; + border: 0; + font-size: 100%; + font: inherit; + vertical-align: baseline; +} +/* HTML5 display-role reset for older browsers */ +article, aside, details, figcaption, figure, +footer, header, hgroup, menu, nav, section { + display: block; +} +body { + line-height: 1; +} +ol, ul { + list-style: none; +} +blockquote, q { + quotes: none; +} +blockquote:before, blockquote:after, +q:before, q:after { + content: ''; + content: none; +} +table { + border-collapse: collapse; + border-spacing: 0; +} + + +/* Workbench */ + +body { + margin: 10px; +} + +#tab-selector a { + padding: 10px; + border: 1px solid; + border-right: none; + border-bottom: none; + display: inline-block; + float: left; +} + +#tab-selector a:last-of-type { + border-right: 1px solid; +} + +#current-namespace { + float: right; +} + +.active { + text-shadow: 1px 0px 0px black; + /*font-weight: bold;*/ +} + +.tab { + display: none; + clear: both; + border: 1px solid; + padding: 10px; +} Modified: branches/RDR/bigdata-war/src/html/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/workbench.js 2014-02-17 19:20:06 UTC (rev 7849) +++ branches/RDR/bigdata-war/src/html/workbench.js 2014-02-19 01:47:28 UTC (rev 7850) @@ -1,5 +1,26 @@ -/* Multi purpose data entry */ +$(function() { +$('#tab-selector a').click(function() { + $('.tab').hide(); + $('#' + $(this).data('target')).show(); + $('#tab-selector a').removeClass(); + $(this).addClass('active'); +}); + +$('#tab-selector a:first').click(); + +// TODO: set namespace initially + +/* Namespaces */ + +function getNamespaces() { + $.get('/namespace', function(data) { + + }); +} + +/* Load */ + function handleDragOver(e) { e.stopPropagation(); e.preventDefault(); @@ -26,22 +47,22 @@ // if file is too large, tell user to supply local path if(f.size > 100 * 1048576) { alert('File too large, enter local path to file'); - $('#mp-box').val('/path/to/' + f.name); + $('#load-box').val('/path/to/' + f.name); setType('path'); - $('#mp-file').val(''); + $('#load-file').val(''); $('#large-file-message').hide(); return; } // if file is small enough, populate the textarea with it if(f.size < 10 * 1024) { - holder = '#mp-box'; - $('#mp-hidden').val(''); + holder = '#load-box'; + $('#load-hidden').val(''); $('#large-file-message').hide(); } else { // store file contents in hidden input and clear textarea - holder = '#mp-hidden'; - $('#mp-box').val(''); + holder = '#load-hidden'; + $('#load-box').val(''); $('#large-file-message').show(); } var fr = new FileReader(); @@ -50,7 +71,7 @@ guessType(f.name.split('.').pop().toLowerCase(), e2.target.result); }; fr.readAsText(f); - $('#mp-file').val(''); + $('#load-file').val(''); } function guessType(extension, content) { @@ -100,7 +121,7 @@ } function setType(type, format) { - $('#mp-type').val(type); + $('#load-type').val(type); if(type == 'rdf') { $('#rdf-type').show(); $('#rdf-type').val(format); @@ -133,15 +154,15 @@ var sparql_update_commands = ['INSERT', 'DELETE']; // stores the id of the element that contains the data to be sent -var holder = '#mp-box'; +var holder = '#load-box'; -$('#mp-file').change(handleFile); -$('#mp-box').on('dragover', handleDragOver); -$('#mp-box').on('drop', handleFile); -$('#mp-box').on('paste', handlePaste); -$('#mp-type').change(handleTypeChange); +$('#load-file').change(handleFile); +$('#load-box').on('dragover', handleDragOver); +$('#load-box').on('drop', handleFile); +$('#load-box').on('paste', handlePaste); +$('#load-type').change(handleTypeChange); -$('#mp-send').click(function() { +$('#load button').click(function() { // determine action based on type var settings = { type: 'POST', @@ -149,7 +170,7 @@ success: updateResponseXML, error: updateResponseError } - switch($('#mp-type').val()) { + switch($('#load-type').val()) { case 'sparql': settings.data = 'update=' + encodeURI(settings.data); settings.success = updateResponseHTML; @@ -170,11 +191,11 @@ $.ajax('/sparql', settings); }); -function updateResponseHTML(data, textStatus, jqXHR) { +function updateResponseHTML(data) { $('#response').html(data); } -function updateResponseXML(data, textStatus, jqXHR) { +function updateResponseXML(data) { var modified = data.childNodes[0].attributes['modified'].value; var milliseconds = data.childNodes[0].attributes['milliseconds'].value; $('#response').text('Modified: ' + modified + '\nMilliseconds: ' + milliseconds); @@ -236,7 +257,7 @@ $.ajax('/sparql', settings); } -function updateNavigationStart(data, textStatus, jqXHR) { +function updateNavigationStart(data) { var disp = $('#navigator-display'); disp.html(''); // see if we got any results @@ -295,3 +316,4 @@ $('#navigator-display').html('Error! ' + textStatus + ' ' + errorThrown); } +}); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-02-17 19:20:18
|
Revision: 7849 http://bigdata.svn.sourceforge.net/bigdata/?rev=7849&view=rev Author: tobycraig Date: 2014-02-17 19:20:06 +0000 (Mon, 17 Feb 2014) Log Message: ----------- Added initial navigator functionality, allowing vertex exploration. Modified Paths: -------------- branches/RDR/bigdata-war/src/html/index.html branches/RDR/bigdata-war/src/html/workbench.js Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-02-17 15:40:41 UTC (rev 7848) +++ branches/RDR/bigdata-war/src/html/index.html 2014-02-17 19:20:06 UTC (rev 7849) @@ -5,6 +5,11 @@ <meta http-equiv="Content-Type" content="text/html;charset=utf-8" > <title>bigdata® NanoSparqlServer</title> <!-- $Id$ --> +<style> +td { + border: 1px solid; +} +</style> </head> <body> @@ -174,6 +179,15 @@ Response: <pre id="response"></pre> +<h2>Navigator</h2> +Enter a URI to begin navigation +<br> +<form id="navigator"> +<input type="text" id="navigator-uri"> +<input type="submit"> +</form> +<div id="navigator-display"></div> + <script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script> <script>window.jQuery || document.write('<script src="/jquery.min.js"><\/script>')</script> <script src="/workbench.js"></script> Modified: branches/RDR/bigdata-war/src/html/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/workbench.js 2014-02-17 15:40:41 UTC (rev 7848) +++ branches/RDR/bigdata-war/src/html/workbench.js 2014-02-17 19:20:06 UTC (rev 7849) @@ -1,3 +1,5 @@ +/* Multi purpose data entry */ + function handleDragOver(e) { e.stopPropagation(); e.preventDefault(); @@ -181,3 +183,115 @@ function updateResponseError(jqXHR, textStatus, errorThrown) { $('#response').text('Error! ' + textStatus + ' ' + errorThrown); } + + +/* Navigator */ + +$('#navigator').submit(function() { + // get URI + var uri = $('#navigator-uri').val(); + if(uri) { + loadURI(uri); + } + return false; +}); + +function loadURI(uri) { + // send query to server + var query = 'select * \ + where { \ + bind (<URI> as ?vertex) . \ + { \ + bind (<<?vertex ?p ?o>> as ?sid) . \ + optional \ + { \ + { \ + ?sid ?sidP ?sidO . \ + } union { \ + ?sidS ?sidP ?sid . \ + } \ + } \ + } union { \ + bind (<<?s ?p ?vertex>> as ?sid) . \ + optional \ + { \ + { \ + ?sid ?sidP ?sidO . \ + } union { \ + ?sidS ?sidP ?sid . \ + } \ + } \ + } \ + }'; + + query = query.replace('URI', uri); + var settings = { + type: 'POST', + data: 'query=' + encodeURI(query), + dataType: 'json', + accepts: {'json': 'application/sparql-results+json'}, + success: updateNavigationStart, + error: updateNavigationError + }; + $.ajax('/sparql', settings); +} + +function updateNavigationStart(data, textStatus, jqXHR) { + var disp = $('#navigator-display'); + disp.html(''); + // see if we got any results + if(data.results.bindings.length == 0) { + disp.append('No vertex found!'); + return; + } + + var vertex = data.results.bindings[0].vertex; + disp.append('<h3>' + vertex.value + '</h3>'); + var outbound=[], inbound=[], attributes=[]; + for(var i=0; i<data.results.bindings.length; i++) { + var binding = data.results.bindings[i]; + // TODO: are attributes always on outbound relationships? + if('o' in binding) { + if(binding.o.type == 'uri') { + outbound.push(binding); + } else { + attributes.push(binding); + } + } else { + inbound.push(binding); + } + } + + if(outbound.length) { + disp.append('<h4>Outbound links</h4>'); + var table = $('<table>').appendTo(disp); + for(var i=0; i<outbound.length; i++) { + var linkAttributes = outbound[i].sidP.value + ': ' + outbound[i].sidO.value; + table.append('<tr><td>' + outbound[i].p.value + '</td><td><a href="#">' + outbound[i].o.value + '</a></td><td>' + linkAttributes + '</td></tr>'); + } + } + + if(inbound.length) { + disp.append('<h4>Inbound links</h4>'); + var table = $('<table>').appendTo(disp); + for(var i=0; i<inbound.length; i++) { + var linkAttributes = inbound[i].sidP.value + ': ' + inbound[i].sidO.value; + table.append('<tr><td><a href="#">' + inbound[i].s.value + '</a></td><td>' + inbound[i].p.value + '</td><td>' + linkAttributes + '</td></tr>'); + } + } + + if(attributes.length) { + disp.append('<h4>Attributes</h4>'); + var table = $('<table>').appendTo(disp); + for(var i=0; i<attributes.length; i++) { + table.append('<tr><td>' + attributes[i].p.value + '</td><td>' + attributes[i].o.value + '</td></tr>'); + } + } + + disp.find('a').click(function() { loadURI(this.text); return false; }); +} + +function updateNavigationError(jqXHR, textStatus, errorThrown) { + $('#navigator-display').html('Error! ' + textStatus + ' ' + errorThrown); +} + This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-02-17 15:40:50
|
Revision: 7848 http://bigdata.svn.sourceforge.net/bigdata/?rev=7848&view=rev Author: thompsonbry Date: 2014-02-17 15:40:41 +0000 (Mon, 17 Feb 2014) Log Message: ----------- license file header. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java 2014-02-13 23:42:11 UTC (rev 7847) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java 2014-02-17 15:40:41 UTC (rev 7848) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.journal.jini.ha; import java.util.Random; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-02-13 23:42:18
|
Revision: 7847 http://bigdata.svn.sourceforge.net/bigdata/?rev=7847&view=rev Author: tobycraig Date: 2014-02-13 23:42:11 +0000 (Thu, 13 Feb 2014) Log Message: ----------- SPARQL updates, RDF and file paths can all be sent to the server and added to the database. Modified Paths: -------------- branches/RDR/bigdata-war/src/html/index.html branches/RDR/bigdata-war/src/html/workbench.js Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-02-12 00:58:18 UTC (rev 7846) +++ branches/RDR/bigdata-war/src/html/index.html 2014-02-13 23:42:11 UTC (rev 7847) @@ -145,10 +145,12 @@ --> <h2>Multi-purpose textarea</h2> -<form method="post" id="mp-form" enctype="multipart/form-data"> <input id="mp-file" type="file" name="file"> +<br> <input id="mp-hidden" type="hidden" name="large-file-contents"> -<textarea id="mp-box" name="textarea"></textarea> +<p id="large-file-message" style="display: none;">Your file is too large to display here, but will be uploaded as normal.</p> +<textarea id="mp-box" name="textarea" rows="10" cols="80"></textarea> +<br> <select id="mp-type"> <option value="sparql" selected="selected">SPARQL</option> <option value="rdf">RDF</option> @@ -164,8 +166,13 @@ <option value="trix">TriX</option> <option value="turtle">Turtle</option> </select> -<input type="submit"> -</form> +<br> +Tenant Namespace <input type="text" name="namespace" title="Tenant namespace."> (leave empty for default KB) +<br> +<button type="button" id="mp-send">Send</button> +<br> +Response: +<pre id="response"></pre> <script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script> <script>window.jQuery || document.write('<script src="/jquery.min.js"><\/script>')</script> Modified: branches/RDR/bigdata-war/src/html/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/workbench.js 2014-02-12 00:58:18 UTC (rev 7846) +++ branches/RDR/bigdata-war/src/html/workbench.js 2014-02-13 23:42:11 UTC (rev 7847) @@ -27,18 +27,20 @@ $('#mp-box').val('/path/to/' + f.name); setType('path'); $('#mp-file').val(''); + $('#large-file-message').hide(); return; } // if file is small enough, populate the textarea with it - var holder; if(f.size < 10 * 1024) { holder = '#mp-box'; $('#mp-hidden').val(''); + $('#large-file-message').hide(); } else { // store file contents in hidden input and clear textarea holder = '#mp-hidden'; $('#mp-box').val(''); + $('#large-file-message').show(); } var fr = new FileReader(); fr.onload = function(e2) { @@ -59,24 +61,36 @@ setType('rdf', rdf_types[extension]); } else { // extension is no help, see if we can find some SPARQL commands - content = content.toUpperCase(); - for(var i=0, found=false; i<sparql_update_commands.length; i++) { - if(content.indexOf(sparql_update_commands[i]) != -1) { - setType('sparql'); - found = true; - break; - } + setType(identify(content)); + } +} + +function identify(text, considerPath) { + text = text.toUpperCase(); + + if(considerPath) { + // match Unix or Windows paths + var re = /^(((\/[^\/]+)+)|([A-Z]:([\\\/][^\\\/]+)+))$/; + if(re.test(text.trim())) { + return 'path'; } - if(!found) { - setType('rdf', ''); + } + + for(var i=0; i<sparql_update_commands.length; i++) { + if(text.indexOf(sparql_update_commands[i]) != -1) { + return 'sparql'; } } + + return 'rdf'; } -function handlePaste(e) { - alert('pasted!'); - e.stopPropagation(); - e.preventDefault(); +function handlePaste(e) { + // if the input is currently empty, try to identify the pasted content + var that = this; + if(this.value == '') { + setTimeout(function() { setType(identify(that.value, true)); }, 10); + } } function handleTypeChange(e) { @@ -106,8 +120,18 @@ 'trix': 'trix', //'xml': 'trix', 'ttl': 'turtle'}; + +var rdf_content_types = {'n-quads': 'application/n-quads', + 'n-triples': 'text/plain', + 'n3': 'text/n3', + 'rdf/xml': 'application/rdf+xml', + 'trig': 'application/trig', + 'trix': 'application/trix', + 'turtle': 'text/turtle'}; var sparql_update_commands = ['INSERT', 'DELETE']; +// stores the id of the element that contains the data to be sent +var holder = '#mp-box'; $('#mp-file').change(handleFile); $('#mp-box').on('dragover', handleDragOver); @@ -115,7 +139,45 @@ $('#mp-box').on('paste', handlePaste); $('#mp-type').change(handleTypeChange); -$('#mp-form').submit(function() { +$('#mp-send').click(function() { // determine action based on type - + var settings = { + type: 'POST', + data: $(holder).val(), + success: updateResponseXML, + error: updateResponseError + } + switch($('#mp-type').val()) { + case 'sparql': + settings.data = 'update=' + encodeURI(settings.data); + settings.success = updateResponseHTML; + break; + case 'rdf': + var type = $('#rdf-type').val(); + if(!type) { + alert('Please select an RDF content type.'); + return; + } + settings.contentType = rdf_content_types[type]; + break; + case 'path': + settings.data = 'uri=file://' + encodeURI(settings.data); + break; + } + + $.ajax('/sparql', settings); }); + +function updateResponseHTML(data, textStatus, jqXHR) { + $('#response').html(data); +} + +function updateResponseXML(data, textStatus, jqXHR) { + var modified = data.childNodes[0].attributes['modified'].value; + var milliseconds = data.childNodes[0].attributes['milliseconds'].value; + $('#response').text('Modified: ' + modified + '\nMilliseconds: ' + milliseconds); +} + +function updateResponseError(jqXHR, textStatus, errorThrown) { + $('#response').text('Error! ' + textStatus + ' ' + errorThrown); +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-02-12 00:58:28
|
Revision: 7846 http://bigdata.svn.sourceforge.net/bigdata/?rev=7846&view=rev Author: tobycraig Date: 2014-02-12 00:58:18 +0000 (Wed, 12 Feb 2014) Log Message: ----------- Fixed drag & drop in Firefox by adding dragover handler Modified Paths: -------------- branches/RDR/bigdata-war/src/html/workbench.js Modified: branches/RDR/bigdata-war/src/html/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/workbench.js 2014-02-11 23:29:39 UTC (rev 7845) +++ branches/RDR/bigdata-war/src/html/workbench.js 2014-02-12 00:58:18 UTC (rev 7846) @@ -1,3 +1,9 @@ +function handleDragOver(e) { + e.stopPropagation(); + e.preventDefault(); + e.originalEvent.dataTransfer.dropEffect = 'copy'; +} + function handleFile(e) { e.stopPropagation(); e.preventDefault(); @@ -104,6 +110,7 @@ var sparql_update_commands = ['INSERT', 'DELETE']; $('#mp-file').change(handleFile); +$('#mp-box').on('dragover', handleDragOver); $('#mp-box').on('drop', handleFile); $('#mp-box').on('paste', handlePaste); $('#mp-type').change(handleTypeChange); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-02-11 23:29:46
|
Revision: 7845 http://bigdata.svn.sourceforge.net/bigdata/?rev=7845&view=rev Author: tobycraig Date: 2014-02-11 23:29:39 +0000 (Tue, 11 Feb 2014) Log Message: ----------- Added form at bottom of index page to accept file selection or drag/drops, and detect file type. Modified Paths: -------------- branches/RDR/bigdata-war/src/html/index.html Added Paths: ----------- branches/RDR/bigdata-war/src/html/jquery.min.js branches/RDR/bigdata-war/src/html/workbench.js Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-02-05 20:10:41 UTC (rev 7844) +++ branches/RDR/bigdata-war/src/html/index.html 2014-02-11 23:29:39 UTC (rev 7845) @@ -39,6 +39,13 @@ which this page was accessed. </p> +<h2>Linked Data Navigation</h2> +<form action="navigate" method="get"> + <p>Enter a URI to navigate to <input type="text" name="uri"></p> + <p>Tenant Namespace <input type="text" name="namespace" title="Tenant namespace."> (leave empty for default KB)</p> + <input type="submit" value="Send" title="Submit query."> +</form> + <!-- Note: Some applications (firefox 7) can not handle a GET with a very long URL. For that reason ONLY this operation defaults to a POST. You SHOULD use GET for database queries since they are, by and large, idempotent. @@ -136,5 +143,32 @@ </p> </form> --> + +<h2>Multi-purpose textarea</h2> +<form method="post" id="mp-form" enctype="multipart/form-data"> +<input id="mp-file" type="file" name="file"> +<input id="mp-hidden" type="hidden" name="large-file-contents"> +<textarea id="mp-box" name="textarea"></textarea> +<select id="mp-type"> +<option value="sparql" selected="selected">SPARQL</option> +<option value="rdf">RDF</option> +<option value="path">File path</option> +</select> +<select id="rdf-type" style="display: none;"> +<option value="">Select RDF format</option> +<option value="n-quads">N-Quads</option> +<option value="n-triples">N-Triples</option> +<option value="n3">Notation3</option> +<option value="rdf/xml">RDF/XML</option> +<option value="trig">TriG</option> +<option value="trix">TriX</option> +<option value="turtle">Turtle</option> +</select> +<input type="submit"> +</form> + +<script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script> +<script>window.jQuery || document.write('<script src="/jquery.min.js"><\/script>')</script> +<script src="/workbench.js"></script> </body> </html> \ No newline at end of file Added: branches/RDR/bigdata-war/src/html/jquery.min.js =================================================================== Added: branches/RDR/bigdata-war/src/html/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/workbench.js (rev 0) +++ branches/RDR/bigdata-war/src/html/workbench.js 2014-02-11 23:29:39 UTC (rev 7845) @@ -0,0 +1,114 @@ +function handleFile(e) { + e.stopPropagation(); + e.preventDefault(); + + if(e.type == 'drop') { + var files = e.originalEvent.dataTransfer.files; + } else { + var files = e.originalEvent.target.files; + } + + // only one file supported + if(files.length > 1) { + alert('Ignoring all but first file'); + } + + var f = files[0]; + + // if file is too large, tell user to supply local path + if(f.size > 100 * 1048576) { + alert('File too large, enter local path to file'); + $('#mp-box').val('/path/to/' + f.name); + setType('path'); + $('#mp-file').val(''); + return; + } + + // if file is small enough, populate the textarea with it + var holder; + if(f.size < 10 * 1024) { + holder = '#mp-box'; + $('#mp-hidden').val(''); + } else { + // store file contents in hidden input and clear textarea + holder = '#mp-hidden'; + $('#mp-box').val(''); + } + var fr = new FileReader(); + fr.onload = function(e2) { + $(holder).val(e2.target.result); + guessType(f.name.split('.').pop().toLowerCase(), e2.target.result); + }; + fr.readAsText(f); + $('#mp-file').val(''); +} + +function guessType(extension, content) { + // try to guess type + if(extension == 'rq') { + // SPARQL + setType('sparql'); + } else if(extension in rdf_types) { + // RDF + setType('rdf', rdf_types[extension]); + } else { + // extension is no help, see if we can find some SPARQL commands + content = content.toUpperCase(); + for(var i=0, found=false; i<sparql_update_commands.length; i++) { + if(content.indexOf(sparql_update_commands[i]) != -1) { + setType('sparql'); + found = true; + break; + } + } + if(!found) { + setType('rdf', ''); + } + } +} + +function handlePaste(e) { + alert('pasted!'); + e.stopPropagation(); + e.preventDefault(); +} + +function handleTypeChange(e) { + $('#rdf-type').toggle($(this).val() == 'rdf'); +} + +function setType(type, format) { + $('#mp-type').val(type); + if(type == 'rdf') { + $('#rdf-type').show(); + $('#rdf-type').val(format); + } else { + $('#rdf-type').hide(); + } +} + +// .xml is used for both RDF and TriX, assume it's RDF +// We could check the parent element to see which it is +var rdf_types = {'nq': 'n-quads', + 'nt': 'n-triples', + 'n3': 'n3', + 'rdf': 'rdf/xml', + 'rdfs': 'rdf/xml', + 'owl': 'rdf/xml', + 'xml': 'rdf/xml', + 'trig': 'trig', + 'trix': 'trix', + //'xml': 'trix', + 'ttl': 'turtle'}; + +var sparql_update_commands = ['INSERT', 'DELETE']; + +$('#mp-file').change(handleFile); +$('#mp-box').on('drop', handleFile); +$('#mp-box').on('paste', handlePaste); +$('#mp-type').change(handleTypeChange); + +$('#mp-form').submit(function() { + // determine action based on type + +}); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-02-05 20:10:49
|
Revision: 7844 http://bigdata.svn.sourceforge.net/bigdata/?rev=7844&view=rev Author: thompsonbry Date: 2014-02-05 20:10:41 +0000 (Wed, 05 Feb 2014) Log Message: ----------- removed junit import from main code. Modified Paths: -------------- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2014-02-05 12:07:09 UTC (rev 7843) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2014-02-05 20:10:41 UTC (rev 7844) @@ -40,8 +40,6 @@ import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; -import junit.framework.AssertionFailedError; - import org.apache.log4j.Logger; import com.bigdata.bop.BOp; @@ -1055,7 +1053,7 @@ // The cutoff limit. This annotation MUST exist on the JOIN. if (limit != ((Long) joinOp.getRequiredProperty(JoinAnnotations.LIMIT)) .intValue()) - throw new AssertionFailedError(); + throw new AssertionError(); final int joinId = joinOp.getId(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-02-03 12:57:45
|
Revision: 7839 http://bigdata.svn.sourceforge.net/bigdata/?rev=7839&view=rev Author: thompsonbry Date: 2014-02-03 12:57:35 +0000 (Mon, 03 Feb 2014) Log Message: ----------- Added a "Fuzzy SSSP" algorithm. This is not quite finished. It does not actually extract the shortest paths. The SSSP algorithm currently labels the vertices with the minimum distance rather than the predecessor. I am going to talk with Zhisong about how to best captured the predecessor, whether to capture both, and what is involved in supporting a push-style scatter operation. The FuzzySSSP does show how to break out of the BFS if the visited set size exceeds some threashold at the end of a round. You need to look at the set of active vertices, not the frontier. The frontier is just the set of vertices to be visited in that round. The set of active vertices is all vertices that have been visited to date (all vertices for which state has been materialized). Modified Paths: -------------- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASState.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/BFS.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/SSSP.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASState.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestBFS.java Added Paths: ----------- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/FuzzySSSP.java Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASState.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASState.java 2014-01-31 18:37:34 UTC (rev 7838) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASState.java 2014-02-03 12:57:35 UTC (rev 7839) @@ -92,7 +92,7 @@ /** * Get the state for the edge using the appropriate factory. If this is the - * first visit for that edge, then the state is initialized using the + * first visit for that vertex, then the state is initialized using the * factory. Otherwise the existing state is returned. * * @param v @@ -105,6 +105,18 @@ ES getState(Statement e); /** + * Return <code>true</code> iff the specified vertex has an associated + * vertex state object - this is interpreted as meaning that the vertex has + * been "visited". + * + * @param v + * The vertex. + * @return <code>true</code> iff there is vertex state associated with that + * vertex. + */ + boolean isVisited(Value v); + + /** * The current frontier. */ IStaticFrontier frontier(); @@ -243,5 +255,5 @@ * Another vertex. */ int compareTo(Value u, Value v); - + } Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/BFS.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/BFS.java 2014-01-31 18:37:34 UTC (rev 7838) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/BFS.java 2014-02-03 12:57:35 UTC (rev 7839) @@ -260,7 +260,7 @@ } /** - * Reduce the active vertex stat, returning a histogram reporting the #of + * Reduce the active vertex state, returning a histogram reporting the #of * vertices at each distance from the starting vertex. There will always be * one vertex at depth zero - this is the starting vertex. For each * successive depth, the #of vertices that were labeled at that depth is Added: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/FuzzySSSP.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/FuzzySSSP.java (rev 0) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/FuzzySSSP.java 2014-02-03 12:57:35 UTC (rev 7839) @@ -0,0 +1,420 @@ +/** + Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package com.bigdata.rdf.graph.analytics; + +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import org.openrdf.model.Value; + +import com.bigdata.rdf.graph.IGASContext; +import com.bigdata.rdf.graph.IGASEngine; +import com.bigdata.rdf.graph.IGASState; +import com.bigdata.rdf.graph.IGraphAccessor; +import com.bigdata.rdf.graph.IStaticFrontier; +import com.bigdata.rdf.graph.analytics.FuzzySSSP.FuzzySSSPResult; +import com.bigdata.rdf.graph.impl.bd.BigdataGraphFixture; + +/** + * This algorithm provides a fuzzy implementation of the shortest paths between + * a set of source vertices and a set of target vertices. This can be used to + * identify a set of vertices that are close to the shortest paths between those + * source and target vertices. For some domains, the resulting set of vertices + * can be understood as an "interesting subgraph". + * <p> + * Problem: We want to find a set of not more than N vertices out of a data set + * that are "close" to the shortest path between two sets of vertices. + * <p> + * Approach: We want to find the set of SP (Shortest Path) vertices that lie + * along the shortest path between each source vertex and each target vertex. We + * would also like to know whether a source is connected to each target. To do + * this, we do NSOURCES SSSP traversals. For each traversal, we note the depth + * of each target from each source, and mark the depth as -1 if the target was + * not reachable from that source. The vertices along the shortest path to the + * target are collected. The sets of collected vertices are merged and + * duplicates are removed. + * <p> + * Finally, we do a BFS starting with all of the vertices in that merged + * collection and stopping when we have N vertices, including those along the + * shortest paths. This grows the initial set of vertices that lie along the + * shortest paths into a broader collection of vertices that are close to that + * shortest path. + * <p> + * Outputs: The N vertices, their distances from the shortest paths (which we + * get out of the final BFS), and the distance of each target from each source + * along the shortest path (which we get from the per-source SSSP traversals). + * + * TODO Support breaking out of the analytic as soon as the frontier is known to + * contain at least N distinct vertices. Note that for frontier implementations + * that allow duplicates, this means that you need to wait for the end of the + * iteration to make the decision. We already support a decision point at the + * end of each iteration. This would allow us to lift the decision point inside + * of the iteration and terminate processing eagerly when the frontier size + * exceeds a specified value. + * + * TODO: Implement unit test with ground truth. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class FuzzySSSP implements Callable<FuzzySSSPResult>{ + + /** + * The source vertices (there must be at least one). + */ + private final Value[] src; + /** + * The target vertices (there must be at least one). + */ + private final Value[] tgt; + /** + * The maximum number of vertices to report (stopping criteria for the BFS + * expansion). + */ + private final int N; + + /** + * The {@link IGASEngine} used to run the analytics. + */ + private final IGASEngine gasEngine; + + /** + * The object used to access the graph. + */ + private final IGraphAccessor graphAccessor; + + /** + * Interface for communicating the results back to the caller. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + public class FuzzySSSPResult { + + /** + * The reachability map. The keys of the outer map are the source + * vertices. The values in the inner maps are the target vertices that + * are reachable from a given source vertex (both the key and the value + * of the inner map is the target vertex - it is being used as a set). + */ + private ConcurrentMap<Value, ConcurrentMap<Value, Value>> reachable = new ConcurrentHashMap<Value, ConcurrentMap<Value, Value>>(); + + /** + * The set of visited vertices. + */ + private Set<Value> visited = new LinkedHashSet<Value>(); + +// private Map<Value,Set<Value>> + private boolean addVisited(final Value v) { + + return visited.add(v); + + } + + /** + * Assert that the target was reachable from the source. + * + * @param src + * The source. + * @param tgt + * The target. + */ + private void addReachable(final Value src, final Value tgt) { + + if (src == null) + throw new IllegalArgumentException(); + + if (tgt == null) + throw new IllegalArgumentException(); + + ConcurrentMap<Value, Value> tmp = reachable.get(src); + + if (tmp == null) { + + final ConcurrentMap<Value, Value> old = reachable.putIfAbsent( + src, tmp = new ConcurrentHashMap<Value, Value>()); + + if (old != null) { + + // Lost the data race. + tmp = old; + + } + + } + + // add target to the reachability set for that source. + tmp.putIfAbsent(tgt, tgt); + + } + + /** + * Return the number of visited vertices. + */ + public int getVisitedCount() { + + return visited.size(); + + } + + /** + * Return <code>true</code> if the given target is reachable by the + * given source. + * + * @param src + * The source. + * @param tgt + * The target. + * @return <code>true</code> iff the target is reachable from that + * source. + */ + public boolean getReachable(Value src, Value tgt) { + throw new UnsupportedOperationException(); + } + + /** + * Return the set of vertices that were discovered by the analytic. This + * constitutes an "interesting subgraph". The source and target vertices + * will be included in this collection. Each vertex along a shortest + * path from a source vertex to each of the target vertices will be + * included. Finally, each vertex in the BFS expension of those vertices + * will be included up to the maximum specified when the analytic was + * run. + */ + public Set<Value> getVisitedVertices() { + throw new UnsupportedOperationException(); + } + + /** + * TODO Also show the reachability matrix and perhaps the visited + * vertices in level sets. + */ + @Override + public String toString() { + + return getClass().getName() + "{nvisited=" + visited.size() + "}"; + + } + + } // class FuzzySSSPResult + + /** + * + * @param src + * The source vertices (there must be at least one). + * @param tgt + * The target vertices (there must be at least one). + * @param N + * The maximum number of vertices to report (must be positive), + * i.e., the stopping criteria for the BFS expansion. + * @param gasEngine + * The {@link IGASEngine} will be used to execute the analytic. + * @param graphAccessor + * The object used to access the graph. + */ + public FuzzySSSP(final Value[] src, final Value[] tgt, final int N, + final IGASEngine gasEngine, final IGraphAccessor graphAccessor) { + + if (src == null) + throw new IllegalArgumentException(); + if (src.length == 0) + throw new IllegalArgumentException(); + for (Value v : src) + if (v == null) + throw new IllegalArgumentException(); + if (tgt == null) + throw new IllegalArgumentException(); + if (tgt.length == 0) + throw new IllegalArgumentException(); + for (Value v : tgt) + if (v == null) + throw new IllegalArgumentException(); + if (N <= 0) + throw new IllegalArgumentException(); + if (gasEngine == null) + throw new IllegalArgumentException(); + if (graphAccessor == null) + throw new IllegalArgumentException(); + + this.src = src; + this.tgt = tgt; + this.N = N; + this.gasEngine = gasEngine; + this.graphAccessor = graphAccessor; + } + + @Override + public FuzzySSSPResult call() throws Exception { + + final FuzzySSSPResult result = new FuzzySSSPResult(); + + /* + * For each source vertex, do an SSSP pass. This labels all reachable + * vertices with their distance from that source vertex. This will also + * tell us whether each of the target vertices was reachable from a + * given source vertex. + * + * Each time we do the SSSP for a source vertex, we collect the set of + * vertices lying along a shortest path from the source vertex to each + * of the target vertices. These collections are combined and will be + * used as the starting point for BFS (below). + */ + + // The set of vertices along a shortest path. + final Set<Value> setAll = new LinkedHashSet<Value>(); + + for (Value src : this.src) { + + final IGASContext<SSSP.VS, SSSP.ES, Integer> gasContext = gasEngine + .newGASContext(graphAccessor, new SSSP()); + + final IGASState<SSSP.VS, SSSP.ES, Integer> gasState = gasContext + .getGASState(); + + // Initialize the frontier. + gasState.setFrontier(gasContext, src); + + // Converge. + gasContext.call(); + + // The set of vertices along a shortest path for this source. + final Set<Value> set = new LinkedHashSet<Value>(); + + /* + * FIXME Extract the vertices on a shortest path. + * + * Note: This requires either maintaining the predecessor map or + * efficiently obtaining it (if this is possible) from the levels. + */ + + // Extract whether each target vertex is reachable + for (Value tgt : this.tgt) { + if (gasState.isVisited(tgt)) { + // That target was visited for this source. + result.addReachable(src, tgt); + } + } + + // Combine with the vertices from the other sources. + setAll.addAll(set); + + } + + /* + * BFS. + * + * We populate the initial frontier with the set of vertices that we + * collected above. + * + * Note: BFS is overridden to halt once we have visited at least N + * vertices. + */ + { + final IGASContext<BFS.VS, BFS.ES, Void> gasContext = gasEngine + .newGASContext(graphAccessor, new BFS() { + @Override + public boolean nextRound(IGASContext<VS, ES, Void> ctx) { + final IStaticFrontier frontier = ctx.getGASState() + .frontier(); + final Iterator<Value> itr = frontier.iterator(); + while (itr.hasNext()) { + final Value v = itr.next(); + if (result.addVisited(v) + && result.getVisitedCount() >= N) { + /* + * We have reached our threshold during the + * BFS expansion. + * + * Note: Since we are expanding in a breadth + * first manner, all vertices discovered + * during a given iteration are at the same + * distance from the initial set of vertices + * collected from the shortest paths. + */ + return false; + } + } + // Inherent the base class behavior. + return super.nextRound(ctx); + } + }); + + final IGASState<BFS.VS, BFS.ES, Void> gasState = gasContext + .getGASState(); + + // Initialize the frontier. + for (Value v : setAll) { + + // add to frontier. + gasState.setFrontier(gasContext, v); + + // Add to initial visited set. + result.addVisited(v); + + } + + // Converge. + gasContext.call(); + + /* + * Note: We extracted the active vertices in each iteration from the + * new frontier, so we are done as soon we the BFS terminates. + */ + + } + + // Return result. + return result; + + } + + public static void main(final String[] args) throws Exception { + + final int nthreads = 4; + + final Properties properties = new Properties(); + + final BigdataGraphFixture graphFixture = new BigdataGraphFixture( + properties); + + final IGASEngine gasEngine = graphFixture.newGASEngine(nthreads); + + try { + + final Value[] src = null; + final Value[] tgt = null; + final int N = 0; + + final IGraphAccessor graphAccessor = graphFixture + .newGraphAccessor(null/* ignored */); + + final FuzzySSSPResult result = new FuzzySSSP(src, tgt, N, + gasEngine, graphAccessor).call(); + + System.out.println(result); + + } finally { + + gasEngine.shutdownNow(); + + } + } + +} Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/SSSP.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/SSSP.java 2014-01-31 18:37:34 UTC (rev 7838) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/SSSP.java 2014-02-03 12:57:35 UTC (rev 7839) @@ -44,7 +44,13 @@ * undirected scatter/gather. Add unit test for undirected. * * FIXME New SSSP (push style scatter abstraction with new test case - * based on graph example developed for this) + * based on graph example developed for this). Note: The push style + * scatter on the GPU is implemented by capturing each (src,edge) pair + * as a distint entry in the frontier. This gives us all of the + * necessary variety. We then reduce that variety, applying the binary + * operator to combine the intermediate results. Finally, an APPLY() + * phase is executed to update the state of the distinct vertices in the + * frontier. * * TODO Add a reducer to report the actual minimum length paths. This is * similar to a BFS tree, but the path lengths are not integer values so Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASState.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASState.java 2014-01-31 18:37:34 UTC (rev 7838) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASState.java 2014-02-03 12:57:35 UTC (rev 7839) @@ -210,6 +210,13 @@ } @Override + public boolean isVisited(final Value v) { + + return vertexState.get(v) != null; + + } + + @Override public ES getState(final Statement e) { if (edgeState == null) Modified: branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestBFS.java =================================================================== --- branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestBFS.java 2014-01-31 18:37:34 UTC (rev 7838) +++ branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestBFS.java 2014-02-03 12:57:35 UTC (rev 7839) @@ -23,8 +23,6 @@ */ package com.bigdata.rdf.graph.impl.bd; -import org.apache.http.util.ExceptionUtils; - import com.bigdata.rdf.graph.IGASContext; import com.bigdata.rdf.graph.IGASEngine; import com.bigdata.rdf.graph.IGASState; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-31 18:37:45
|
Revision: 7838 http://bigdata.svn.sourceforge.net/bigdata/?rev=7838&view=rev Author: thompsonbry Date: 2014-01-31 18:37:34 +0000 (Fri, 31 Jan 2014) Log Message: ----------- Modified the HARestore utility to support the automatic detection of the most recent snapshot, extraction of the journal from that snapshot, and rollforward. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/NoSnapshotException.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java 2014-01-31 17:44:48 UTC (rev 7837) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java 2014-01-31 18:37:34 UTC (rev 7838) @@ -26,10 +26,15 @@ import java.io.File; import java.io.FileFilter; import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; import java.util.Formatter; import org.apache.log4j.Logger; +import com.bigdata.ha.halog.IHALogReader; +import com.bigdata.journal.jini.ha.SnapshotManager; + /** * Utility class for operations on files that are named using a commit counter. * @@ -247,4 +252,91 @@ } + /** + * Find and return the {@link File} associated with the greatest commit + * counter. This uses a reverse order search to locate the most recent file + * very efficiently. + * + * @param f + * The root of the directory structure for the snapshot or HALog + * files. + * @param fileFilter + * Either the {@link SnapshotManager#SNAPSHOT_FILTER} or the + * {@link IHALogReader#HALOG_FILTER}. + * + * @return The file from the directory structure associated with the + * greatest commit counter. + * + * @throws IOException + */ + public static File findGreatestCommitCounter(final File f, + final FileFilter fileFilter) throws IOException { + + if (f == null) + throw new IllegalArgumentException(); + + if (fileFilter == null) + throw new IllegalArgumentException(); + + if (f.isDirectory()) { + + final File[] files = f.listFiles(fileFilter); + + /* + * Sort into (reverse) lexical order to force visitation in + * (reverse) lexical order. + * + * Note: This should work under any OS. Files will be either + * directory names (3 digits) or filenames (21 digits plus the file + * extension). Thus the comparison centers numerically on the digits + * that encode either part of a commit counter (subdirectory) or an + * entire commit counter (HALog file). + */ + Arrays.sort(files,ReverseFileComparator.INSTANCE); + + for (int i = 0; i < files.length; i++) { + + final File tmp = findGreatestCommitCounter(files[i], fileFilter); + + if (tmp != null) { + + // Done. + return tmp; + + } + + } + + } else if (fileFilter.accept(f)) { + + // Match + return f; + + } + + // No match. + return null; + + } + + /** + * Impose a reverse sort on files. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + private static class ReverseFileComparator implements Comparator<File> { + + @Override + public int compare(final File o1, final File o2) { + + return o2.compareTo(o1); + + } + + /** Impose a reverse sort on files. */ + private static final Comparator<File> INSTANCE = new ReverseFileComparator(); + + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java 2014-01-31 17:44:48 UTC (rev 7837) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java 2014-01-31 18:37:34 UTC (rev 7838) @@ -27,6 +27,8 @@ package com.bigdata.journal; import java.io.File; +import java.io.FileFilter; +import java.io.IOException; import junit.framework.TestCase2; @@ -63,4 +65,117 @@ } + public void test_findGreatestCommitCounter() throws IOException { + + final String ext = ".tmp"; + + final FileFilter fileFilter = new FileFilter() { + + @Override + public boolean accept(final File f) { + if (f.isDirectory()) { + + return true; + + } + return f.getName().endsWith(ext); + } + + }; + + // temp directory for this test. + final File dir = File.createTempFile(getName(), ""); + try { + + if (!dir.delete()) + fail("Could not delete: " + dir); + if (!dir.mkdirs()) + fail("Could not create: " + dir); + + final File f1 = CommitCounterUtility.getCommitCounterFile(dir, 1L, + ext); + final File f10 = CommitCounterUtility.getCommitCounterFile(dir, + 10L, ext); + final File f100 = CommitCounterUtility.getCommitCounterFile(dir, + 100L, ext); + final File f1000 = CommitCounterUtility.getCommitCounterFile(dir, + 1000L, ext); + final File f10000 = CommitCounterUtility.getCommitCounterFile(dir, + 10000L, ext); + + // No files. Returns null. + assertEquals(null, CommitCounterUtility.findGreatestCommitCounter( + dir, fileFilter)); + + // Create directory structure. + if (!f10.getParentFile().mkdirs()) + fail("Could not create directory structure: " + f1000); + + // No files. Returns null. + assertEquals(null, CommitCounterUtility.findGreatestCommitCounter( + dir, fileFilter)); + + if (!f10.createNewFile()) + fail("Could not create: " + f10); + + // This is the only file. It should be returned. + assertEquals(f10, CommitCounterUtility.findGreatestCommitCounter( + dir, fileFilter)); + + // Create a file with a commit counter LT that file. + if (!f1.createNewFile()) + fail("Could not create: " + f1); + + // The return value should not change. + assertEquals(f10, CommitCounterUtility.findGreatestCommitCounter( + dir, fileFilter)); + + // Create a file with a larger commit counter. + if (!f100.createNewFile()) + fail("Could not create: " + f100); + + // That file should now be returned. + assertEquals(f100, CommitCounterUtility.findGreatestCommitCounter( + dir, fileFilter)); + + // Create a file with a larger commit counter. The commit counter + // will cause another directory to be created. + if (!f1000.getParentFile().mkdirs()) + fail("Could not create directory structure: " + f1000); + if (!f1000.createNewFile()) + fail("Could not create: " + f1000); + + // That file should now be returned. + assertEquals(f1000, CommitCounterUtility.findGreatestCommitCounter( + dir, fileFilter)); + + // Create a new directory structure, but do not add a file. The new + // directory structure is ordered GT the existing files. For this + // case the algorithm needs to work backwards to see if it can find + // a non-empty directory. + if (!f10000.getParentFile().mkdirs()) + fail("Could not create directory structure: " + f10000); + + // The same file should be returned since the new dir is empty. + assertEquals(f1000, CommitCounterUtility.findGreatestCommitCounter( + dir, fileFilter)); + + // Add a file to that directory. + if (!f10000.createNewFile()) + fail("Could not create: " + f10000); + + // That file should be returned. + assertEquals(f10000, + CommitCounterUtility.findGreatestCommitCounter(dir, + fileFilter)); + + } finally { + + CommitCounterUtility.recursiveDelete(false/* errorIfDeleteFails */, + dir, fileFilter); + + } + + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java 2014-01-31 17:44:48 UTC (rev 7837) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java 2014-01-31 18:37:34 UTC (rev 7838) @@ -40,6 +40,7 @@ import com.bigdata.io.DirectBufferPool; import com.bigdata.io.IBufferAccess; import com.bigdata.io.writecache.WriteCache; +import com.bigdata.journal.CommitCounterUtility; import com.bigdata.journal.IHABufferStrategy; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.Journal; @@ -58,9 +59,21 @@ */ private static final Logger haLog = Logger.getLogger("com.bigdata.haLog"); + /** The journal to be rolled forward. */ private final Journal journal; + /** + * The directory containing the HALog files to be applied to that journal. + */ private final File haLogDir; + /** + * + * @param journal + * The journal to be rolled forward. + * @param haLogDir + * The directory containing the HALog files to be applied to that + * journal. + */ public HARestore(final Journal journal, final File haLogDir) { if (journal == null) @@ -349,43 +362,54 @@ } /** - * Apply HALog file(s) to the journal. Each HALog file represents a single - * native transaction on the database and will advance the journal by one - * commit point. The journal will go through a local commit protocol as each - * HALog is applied. HALogs will be applied starting with the first commit - * point GT the current commit point on the journal. You may optionally - * specify a stopping criteria, e.g., the last commit point that you wish to - * restore. If no stopping criteria is specified, then all HALog files in - * the specified directory will be applied and the journal will be rolled - * forward to the most recent transaction. The HALog files are not removed, - * making this process safe. + * Apply HALog file(s) to a journal or snapshot file. If the file specified + * is a snapshot, then it is uncompressed into the current working directory + * to obtain a journal file and the HALogs are applied to that journal. If + * the file specified is a journal, then the HALog files are simply rolled + * forward against that journal. If the file is a directory, it is assumed + * to be the snapshot directory. In this case, the most recent snapshot file + * is located, decompressed to obtain a journal file, and then rolled + * forward by applying any more recent HALog files. + * <p> + * Each HALog file represents a single native transaction on the database + * and will advance the journal by one commit point. The journal will go + * through a local commit protocol as each HALog is applied. HALogs will be + * applied starting with the first commit point GT the current commit point + * on the journal. You may optionally specify a stopping criteria, e.g., the + * last commit point that you wish to restore. If no stopping criteria is + * specified, then all HALog files in the specified directory will be + * applied and the journal will be rolled forward to the most recent + * transaction. The HALog files are not removed, making this process safe. * * @param args - * <code>[options] journalFile haLogDir</code><br> + * <code>[options] journalOrSnapshotFileOrSnapshotDir haLogDir</code> + * <br> * where <code>journalFile</code> is the name of the journal file<br> * where <code>haLogDir</code> is the name of a directory * containing zero or more HALog files<br> * where <code>options</code> are any of: * <dl> - * <dt>-l</dt> - * <dd>List available commit points, but do not apply them. This - * option provides information about the current commit point on - * the journal and the commit points available in the HALog - * files.</dd> - * <dt>-h commitCounter</dt> - * <dd>The last commit counter that will be applied (halting - * point for restore).</dd> + * <dt>-l</dt> <dd>List available commit points, but do not apply + * them. This option provides information about the current + * commit point on the journal and the commit points available in + * the HALog files.</dd> <dt>-h commitCounter</dt> <dd>The last + * commit counter that will be applied (halting point for + * restore).</dd> * </dl> * * @return <code>0</code> iff the operation was fully successful. - * @throws IOException * - * @throws Exception + * @throws IOException + * if an error occcur when reading an HALog or writing on the + * journal. + * @throws NoSnapshotException + * if you specify a snapshot directory to be searched, but no + * snapshot files are found. This can happend you specify the + * wrong directory. It can also happen if you are using the + * {@link NoSnapshotPolicy} and never took a snapshot! + * @throws RuntimeException * if the {@link UUID}s or other critical metadata of the * journal and the HALogs differ. - * @throws Exception - * if an error occcur when reading an HALog or writing on the - * journal. */ public static void main(final String[] args) throws IOException { @@ -446,13 +470,47 @@ // HALogDir. final File haLogDir = new File(args[i++]); - /* - * Decompress the snapshot onto a temporary file in the current working - * directory. - */ + if(journalFile.isDirectory()) { + /* + * File is a directory. + * + * Locate the most recent snapshot in that directory structure. + */ + + File tmp = CommitCounterUtility.findGreatestCommitCounter( + journalFile, SnapshotManager.SNAPSHOT_FILTER); + + if (tmp == null) { + + /* + * There are no snapshot files. + * + * Note: This can happen if you specify the wrong directory. It + * can also happen if you are using the NoSnapshotPolicy and + * never took a snapshot! + */ + + throw new NoSnapshotException("No snapshot file(s): " + + journalFile); + + } + + System.out.println("Most recent snapshot: " + tmp); + + journalFile = tmp; + + } + if (journalFile.getName().endsWith(SnapshotManager.SNAPSHOT_EXT)) { + /* + * File is a snapshot. + * + * Decompress the snapshot onto a temporary file in the current + * working directory. + */ + // source is the snapshot. final File in = journalFile; @@ -541,6 +599,12 @@ } + private static void usage(final String[] args) { + + System.err.println("usage: (-l|-h commitPoint) <journalFile> haLogDir"); + + } + /** * Verify that the HALog root block is consistent with the Journal's root * block. @@ -578,10 +642,4 @@ } - private static void usage(final String[] args) { - - System.err.println("usage: (-l|-h commitPoint) <journalFile> haLogDir"); - - } - } Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/NoSnapshotException.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/NoSnapshotException.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/NoSnapshotException.java 2014-01-31 18:37:34 UTC (rev 7838) @@ -0,0 +1,55 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.journal.jini.ha; + +import java.io.IOException; + +/** + * An instance of this exception is thrown if the {@link HARestore} class is + * unable to locate a snapshot file. This can happend you specify the wrong + * directory. It can also happen if you are using the {@link NoSnapshotPolicy} + * and never took a snapshot! + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class NoSnapshotException extends IOException { + + private static final long serialVersionUID = 1L; + + public NoSnapshotException() { + super(); + } + + public NoSnapshotException(String message, Throwable cause) { + super(message, cause); + } + + public NoSnapshotException(String message) { + super(message); + } + + public NoSnapshotException(Throwable cause) { + super(cause); + } +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-31 17:45:06
|
Revision: 7837 http://bigdata.svn.sourceforge.net/bigdata/?rev=7837&view=rev Author: thompsonbry Date: 2014-01-31 17:44:48 +0000 (Fri, 31 Jan 2014) Log Message: ----------- Merge from main development branch to RDR branch prior to bringing code back to the main branch. {{{ Merge complete. ===== File Statistics: ===== Deleted: 13 Added: 105 Updated: 273 ==== Property Statistics: ===== Updated: 50 ==== Conflict Statistics: ===== File conflicts: 2 }}} The conflicts are: {{{ C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_RDR/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestStatementBuffer.java }}} In both cases, we accepted the RDR branch version of the file. Note: With this merge, the SIDS mode is no longer available! Instead, we support efficient per-statement metadata in both the triples and quads mode of the database. This also has an impact on query. Instead of the SIDs mode query pattern (where the graph variable is bound to the statement), you need to use the RDR syntax for either explicit or implicit binding of the statement on a variable. This commit also provides support for the terse RDR syntax when parsing ntriples. Reified statement models are also automatically turned into efficient inline representations, but the use of the RDF reification syntax does impose an overhead since the blank nodes associated with the statement model will persist throughput the parse of the document. This is not efficient for large documents. See #526 (Reification done right) Modified Paths: -------------- branches/RDR/.classpath branches/RDR/README branches/RDR/bigdata/src/java/com/bigdata/bop/AbstractAccessPathOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/BOpBase.java branches/RDR/bigdata/src/java/com/bigdata/bop/BOpContext.java branches/RDR/bigdata/src/java/com/bigdata/bop/BOpIdFactory.java branches/RDR/bigdata/src/java/com/bigdata/bop/BOpUtility.java branches/RDR/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/IdFactory.java branches/RDR/bigdata/src/java/com/bigdata/bop/PipelineOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/ap/SampleIndex.java branches/RDR/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/constraint/Constraint.java branches/RDR/bigdata/src/java/com/bigdata/bop/controller/HTreeNamedSubqueryOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/controller/JVMNamedSubqueryOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java branches/RDR/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java branches/RDR/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java branches/RDR/bigdata/src/java/com/bigdata/bop/engine/BSBundle.java branches/RDR/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java branches/RDR/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java branches/RDR/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/RDR/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java branches/RDR/bigdata/src/java/com/bigdata/bop/engine/RunState.java branches/RDR/bigdata/src/java/com/bigdata/bop/fed/EmptyChunkMessage.java branches/RDR/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java branches/RDR/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/BaseJoinStats.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/HTreeHashIndexOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinUtility.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/HashIndexOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/HashJoinOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/IHashJoinUtility.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/JVMHashIndex.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/JVMHashIndexOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/JVMHashJoinUtility.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/JoinVariableNotBoundException.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/NamedSolutionSetStats.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/PipelineJoinStats.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/SolutionSetHashJoinOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph/NoSolutionsException.java branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph/PartitionedJoinGroup.java branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph/rto/EdgeSample.java branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Path.java branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph/rto/PathIds.java branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph/rto/SampleBase.java branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Vertex.java branches/RDR/bigdata/src/java/com/bigdata/bop/paths/ZeroLengthPathOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/solutions/DropOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/solutions/GroupByOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/solutions/GroupByRewriter.java branches/RDR/bigdata/src/java/com/bigdata/bop/solutions/GroupByState.java branches/RDR/bigdata/src/java/com/bigdata/bop/solutions/MemoryGroupByOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/solutions/MemorySortOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/solutions/PipelinedAggregationOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/solutions/ProjectionOp.java branches/RDR/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java branches/RDR/bigdata/src/java/com/bigdata/btree/IndexMetadata.java branches/RDR/bigdata/src/java/com/bigdata/btree/data/DefaultLeafCoder.java branches/RDR/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/AbstractKeyBuffer.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/AbstractRaba.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/ConditionalRabaCoder.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/EmptyRaba.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/IRaba.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/MutableKeysRaba.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/MutableValueBuffer.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/codec/AbstractCodedRaba.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/codec/CanonicalHuffmanRabaCoder.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/codec/EmptyRabaValueCoder.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/codec/FixedLengthValueRabaCoder.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/codec/FrontCodedRabaCoder.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/codec/IRabaCoder.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/codec/SimpleRabaCoder.java branches/RDR/bigdata/src/java/com/bigdata/concurrent/FutureTaskInvariantMon.java branches/RDR/bigdata/src/java/com/bigdata/ha/HAPipelineGlue.java branches/RDR/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java branches/RDR/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java branches/RDR/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java branches/RDR/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java branches/RDR/bigdata/src/java/com/bigdata/ha/msg/HAWriteMessage.java branches/RDR/bigdata/src/java/com/bigdata/ha/msg/HAWriteMessageBase.java branches/RDR/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java branches/RDR/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java branches/RDR/bigdata/src/java/com/bigdata/ha/pipeline/PipelineDownstreamChange.java branches/RDR/bigdata/src/java/com/bigdata/ha/pipeline/PipelineUpstreamChange.java branches/RDR/bigdata/src/java/com/bigdata/htree/DirectoryPage.java branches/RDR/bigdata/src/java/com/bigdata/htree/NodeSerializer.java branches/RDR/bigdata/src/java/com/bigdata/htree/raba/MutableKeyBuffer.java branches/RDR/bigdata/src/java/com/bigdata/htree/raba/MutableValueBuffer.java branches/RDR/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/RDR/bigdata/src/java/com/bigdata/journal/Name2Addr.java branches/RDR/bigdata/src/java/com/bigdata/journal/Options.java branches/RDR/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java branches/RDR/bigdata/src/java/com/bigdata/quorum/QuorumActor.java branches/RDR/bigdata/src/java/com/bigdata/quorum/QuorumClient.java branches/RDR/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java branches/RDR/bigdata/src/java/com/bigdata/relation/accesspath/ElementFilter.java branches/RDR/bigdata/src/java/com/bigdata/relation/accesspath/IBindingSetAccessPath.java branches/RDR/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialCloseableIterator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/ChunkedArrayIterator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/ChunkedArraysIterator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/ChunkedConvertingIterator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/ChunkedOrderedStriterator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/ChunkedResolvingIterator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/Chunkerator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/CloseableIteratorWrapper.java branches/RDR/bigdata/src/java/com/bigdata/striterator/Dechunkerator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/DelegateChunkedIterator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/GenericChunkedStriterator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/IChunkedIterator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/IChunkedStriterator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/MergeFilter.java branches/RDR/bigdata/src/java/com/bigdata/striterator/PushbackIterator.java branches/RDR/bigdata/src/java/com/bigdata/striterator/Resolver.java branches/RDR/bigdata/src/java/com/bigdata/striterator/Striterator.java branches/RDR/bigdata/src/java/com/bigdata/util/NT.java branches/RDR/bigdata/src/releases/RELEASE_1_3_0.txt branches/RDR/bigdata/src/test/com/bigdata/TestAll.java branches/RDR/bigdata/src/test/com/bigdata/bop/TestAll.java branches/RDR/bigdata/src/test/com/bigdata/bop/controller/TestSubqueryOp.java branches/RDR/bigdata/src/test/com/bigdata/bop/controller/TestUnion.java branches/RDR/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/RDR/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_Slice.java branches/RDR/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_SortOp.java branches/RDR/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java branches/RDR/bigdata/src/test/com/bigdata/bop/fed/TestNIOChunkMessage.java branches/RDR/bigdata/src/test/com/bigdata/bop/join/AbstractHashJoinUtilityTestCase.java branches/RDR/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java branches/RDR/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestAll.java branches/RDR/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestJGraph.java branches/RDR/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestJoinGraph.java branches/RDR/bigdata/src/test/com/bigdata/bop/solutions/TestMemorySortOp.java branches/RDR/bigdata/src/test/com/bigdata/bop/solutions/TestSliceOp.java branches/RDR/bigdata/src/test/com/bigdata/btree/raba/codec/MutableRabaCoder.java branches/RDR/bigdata/src/test/com/bigdata/ha/msg/TestAll.java branches/RDR/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive.java branches/RDR/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java branches/RDR/bigdata/src/test/com/bigdata/htree/AbstractHTreeTestCase.java branches/RDR/bigdata/src/test/com/bigdata/htree/TestAll_HTree.java branches/RDR/bigdata/src/test/com/bigdata/htree/TestHTreeWithMemStore.java branches/RDR/bigdata/src/test/com/bigdata/htree/TestIncrementalWrite.java branches/RDR/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java branches/RDR/bigdata/src/test/com/bigdata/journal/TestWORMStrategyNoCache.java branches/RDR/bigdata/src/test/com/bigdata/journal/ha/HABranch.txt branches/RDR/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java branches/RDR/bigdata/src/test/com/bigdata/striterator/TestAll.java branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3RestorePolicy.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServer.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java branches/RDR/bigdata-jini/src/test/com/bigdata/quorum/zk/MockQuorumMember.java branches/RDR/bigdata-perf/CI/govtrack/build.properties branches/RDR/bigdata-rdf/src/java/com/bigdata/bop/rdf/join/ChunkedMaterializationOp.java branches/RDR/bigdata-rdf/src/java/com/bigdata/bop/rdf/join/DataSetJoin.java branches/RDR/bigdata-rdf/src/java/com/bigdata/bop/rdf/update/ParserStats.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainTypeResourceIterator.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/IV.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/EBVBOp.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IPassesMaterialization.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/LangBOp.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/LangMatchesBOp.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/SPARQLConstraint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/TryBeforeMaterializationConstraint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/extensions/DateTimeExtension.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ASTBase.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/AssignmentNode.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ComputedMaterializationRequirement.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ConstructNode.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/GraphPatternGroup.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/GroupMemberValueExpressionNodeBase.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/GroupNodeBase.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/NamedSubqueryInclude.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/NamedSubqueryRoot.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/OrderByNode.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryBase.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryOptimizerEnum.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryRoot.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/SliceNode.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/SubqueryRoot.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ValueExpressionListBaseNode.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ZeroLengthPathNode.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpBase.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpFilters.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpJoins.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUpdate.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTConstructIterator.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTSearchInSearchOptimizer.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTSearchOptimizer.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/DataSetSummary.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SliceServiceFactory.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/AbstractChunkSizeHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/AbstractQueryHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/AnalyticQueryHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/AtOnceHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/BufferChunkCapacityHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/BufferChunkOfChunksCapacityHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/ChunkSizeHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/IQueryHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/OptimizerQueryHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/PipelineMaxMessagesPerTaskHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/PipelineMaxParallelHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/PipelineQueueCapacityHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/QueryHintRegistry.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RunFirstHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RunLastHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RunOnceHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBottomUpOptimizer.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTConstructOptimizer.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTJoinOrderByTypeOptimizer.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTOptimizerList.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTQueryHintOptimizer.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTRangeCountOptimizer.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTSparql11SubqueryOptimizer.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTStaticJoinOptimizer.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCall.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/spo/FastRDFValueCoder2.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/spo/InGraphBinarySearchFilter.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/spo/InGraphHashSetFilter.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOFilter.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java branches/RDR/bigdata-rdf/src/test/com/bigdata/bop/rdf/joinGraph/GenerateBarData.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataAndSPARQLTestCase.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataDrivenSPARQLTestCase.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestQueryHints.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestSubQuery.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTickets.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/query-hints-01.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/query-hints-06.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/search-prefix-match.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestSearch.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTSparql11SubqueryOptimizer.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlClient.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/GroupGraphPatternBuilder.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestGroupGraphPatternBuilder.java branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestSubqueryPatterns.java branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java branches/RDR/bigdata-war/src/html/index.html branches/RDR/build.properties branches/RDR/build.xml branches/RDR/ctc-striterators/src/java/cutthecrap/utils/striterators/FilterBase.java branches/RDR/lgpl-utils/build.properties branches/RDR/lgpl-utils/build.xml branches/RDR/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/CustomByteArrayFrontCodedList.java branches/RDR/pom.xml Added Paths: ----------- branches/RDR/bigdata/lib/lgpl-utils-1.0.7-270114.jar branches/RDR/bigdata/src/java/com/bigdata/bop/SimpleIdFactory.java branches/RDR/bigdata/src/java/com/bigdata/bop/controller/INamedSubqueryOp.java branches/RDR/bigdata/src/java/com/bigdata/btree/raba/codec/FrontCodedRabaCoderDupKeys.java branches/RDR/bigdata/src/java/com/bigdata/ha/AbstractMessageTask.java branches/RDR/bigdata/src/java/com/bigdata/ha/HAPipelineResetRequest.java branches/RDR/bigdata/src/java/com/bigdata/ha/HAPipelineResetResponse.java branches/RDR/bigdata/src/java/com/bigdata/ha/IHAPipelineResetRequest.java branches/RDR/bigdata/src/java/com/bigdata/ha/IHAPipelineResetResponse.java branches/RDR/bigdata/src/java/com/bigdata/ha/msg/HAMessageWrapper.java branches/RDR/bigdata/src/java/com/bigdata/ha/msg/HASendState.java branches/RDR/bigdata/src/java/com/bigdata/ha/msg/IHAMessageWrapper.java branches/RDR/bigdata/src/java/com/bigdata/ha/msg/IHASendState.java branches/RDR/bigdata/src/java/com/bigdata/ha/pipeline/AbstractPipelineChangeException.java branches/RDR/bigdata/src/java/com/bigdata/ha/pipeline/AbstractPipelineException.java branches/RDR/bigdata/src/java/com/bigdata/ha/pipeline/ImmediateDownstreamReplicationException.java branches/RDR/bigdata/src/java/com/bigdata/ha/pipeline/NestedPipelineException.java branches/RDR/bigdata/src/java/com/bigdata/ha/pipeline/PipelineImmediateDownstreamReplicationException.java branches/RDR/bigdata/src/java/com/bigdata/quorum/ServiceLookup.java branches/RDR/bigdata/src/java/com/bigdata/striterator/CloseableChunkedIteratorWrapperConverter.java branches/RDR/bigdata/src/resources/deployment/ branches/RDR/bigdata/src/resources/deployment/vagrant/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/Berksfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/CHANGELOG.md branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/Gemfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/README.md branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/Thorfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/Vagrantfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/attributes/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/attributes/default.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/chefignore branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/default/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/default/test/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/default/test/default_test.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/metadata.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/recipes/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/recipes/default.rb branches/RDR/bigdata/src/test/com/bigdata/ha/msg/TestHASendState.java branches/RDR/bigdata/src/test/com/bigdata/ha/pipeline/AbstractHASendAndReceiveTestCase.java branches/RDR/bigdata/src/test/com/bigdata/ha/pipeline/TestSocketsDirect.java branches/RDR/bigdata/src/test/com/bigdata/htree/TestDuplicates.java branches/RDR/bigdata/src/test/com/bigdata/striterator/TestCloseableChunkedIteratorWrapperConverter.java branches/RDR/bigdata-jini/src/resources/README-JINI branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JustKills.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataQuadWrapper.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/OutOfOrderEvaluationException.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTOLimitQueryHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTONEdgesQueryHint.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTOSampleTypeQueryHint.java branches/RDR/bigdata-rdf/src/resources/data/lehigh/LUBM-U1.rdf.gz branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/ branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/AbstractRTOTestCase.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1-noSolutions.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7b.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7b.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestAll.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_BAR.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_BSBM.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_FOAF.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_LUBM.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/search-prefix-match2.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/sparql11-subselect-filter-01.nt branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/sparql11-subselect-filter-01.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/sparql11-subselect-filter-01.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/sparql11-subselect-filter-01b.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/test_ticket_801_complex_optionals.nt branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/test_ticket_801_complex_optionals.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/test_ticket_801a_complex_optionals.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/test_ticket_801b_complex_optionals.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket-806.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket-806.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket-806.trig branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTMassagedServiceNodeOptimizer.java branches/RDR/bigdata-sails/src/samples/com/bigdata/samples/NSSEmbeddedExample.java branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractNamedGraphUpdateTest.java branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/HashDistinctNamedGraphUpdateTest.java branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/NativeDistinctNamedGraphUpdateTest.java branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestService794.java Removed Paths: ------------- branches/RDR/bigdata/lib/lgpl-utils-1.0.6-020610.jar branches/RDR/bigdata/src/resources/deployment/vagrant/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/Berksfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/CHANGELOG.md branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/Gemfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/README.md branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/Thorfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/Vagrantfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/attributes/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/attributes/default.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/chefignore branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/default/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/default/test/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/default/test/default_test.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/metadata.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/recipes/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-bigdata/recipes/default.rb branches/RDR/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java branches/RDR/bigdata/src/test/com/bigdata/journal/ha/TestAll.java branches/RDR/bigdata/src/test/com/bigdata/journal/ha/TestHAWORMStrategy.java branches/RDR/bigdata/src/test/com/bigdata/journal/ha/TestHAWritePipeline.java branches/RDR/bigdata/src/test/com/bigdata/journal/ha/TestJournalHA.java branches/RDR/bigdata-rdf/src/test/com/bigdata/bop/rdf/joinGraph/AbstractJoinGraphTestCase.java branches/RDR/bigdata-rdf/src/test/com/bigdata/bop/rdf/joinGraph/TestAll.java branches/RDR/bigdata-rdf/src/test/com/bigdata/bop/rdf/joinGraph/TestJoinGraphOnBSBMData.java branches/RDR/bigdata-rdf/src/test/com/bigdata/bop/rdf/joinGraph/TestJoinGraphOnBarData.java branches/RDR/bigdata-rdf/src/test/com/bigdata/bop/rdf/joinGraph/TestJoinGraphOnLubm.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/AbstractRTOTestCase.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1-noSolutions.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7b.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7b.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestAll.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_BAR.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_BSBM.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_FOAF.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_LUBM.java branches/RDR/overview.html Property Changed: ---------------- branches/RDR/ branches/RDR/bigdata/lib/jetty/ branches/RDR/bigdata/src/java/com/bigdata/bop/aggregate/ branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph/ branches/RDR/bigdata/src/java/com/bigdata/bop/util/ branches/RDR/bigdata/src/java/com/bigdata/htree/raba/ branches/RDR/bigdata/src/java/com/bigdata/jsr166/ branches/RDR/bigdata/src/test/com/bigdata/bop/joinGraph/ branches/RDR/bigdata/src/test/com/bigdata/bop/util/ branches/RDR/bigdata/src/test/com/bigdata/jsr166/ branches/RDR/bigdata/src/test/com/bigdata/util/httpd/ branches/RDR/bigdata-compatibility/ branches/RDR/bigdata-jini/src/java/com/bigdata/attr/ branches/RDR/bigdata-jini/src/java/com/bigdata/disco/ branches/RDR/bigdata-jini/src/java/com/bigdata/util/config/ branches/RDR/bigdata-perf/ branches/RDR/bigdata-perf/btc/ branches/RDR/bigdata-perf/btc/src/resources/ branches/RDR/bigdata-perf/lubm/ branches/RDR/bigdata-perf/uniprot/ branches/RDR/bigdata-perf/uniprot/src/ branches/RDR/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/error/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/RDR/bigdata-rdf/src/samples/ branches/RDR/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/ branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ branches/RDR/dsi-utils/ branches/RDR/dsi-utils/LEGAL/ branches/RDR/dsi-utils/lib/ branches/RDR/dsi-utils/src/ branches/RDR/dsi-utils/src/java/ branches/RDR/dsi-utils/src/java/it/ branches/RDR/dsi-utils/src/java/it/unimi/ branches/RDR/dsi-utils/src/test/ branches/RDR/dsi-utils/src/test/it/unimi/ branches/RDR/dsi-utils/src/test/it/unimi/dsi/ branches/RDR/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/RDR/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/RDR/osgi/ branches/RDR/src/resources/bin/config/ Property changes on: branches/RDR ___________________________________________________________________ Modified: svn:ignore - ant-build src bin bigdata*.jar ant-release standalone test* countersfinal.xml events.jnl .settings *.jnl TestInsertRate.out SYSTAP-BBT-result.txt U10load+query *.hprof com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv commit-log.txt eventLog dist bigdata-test com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv DIST.bigdata-*.tgz REL.bigdata-*.tgz queryLog* queryRunState* sparql.txt benchmark CI + ant-build src bin bigdata*.jar ant-release standalone test* countersfinal.xml events.jnl .settings *.jnl TestInsertRate.out SYSTAP-BBT-result.txt U10load+query *.hprof com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv commit-log.txt eventLog dist bigdata-test com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv DIST.bigdata-*.tgz REL.bigdata-*.tgz queryLog* queryRunState* sparql.txt benchmark CI bsbm10-dataset.nt.gz bsbm10-dataset.nt.zip Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7380 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE:7215-7271 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/ZK_DISCONNECT_HANDLING:7465-7484 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7380 /branches/BIGDATA_RELEASE_1_3_0:7665-7836 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/MGC_1_3_0:7609-7752 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE:7215-7271 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/ZK_DISCONNECT_HANDLING:7465-7484 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 Modified: branches/RDR/.classpath =================================================================== --- branches/RDR/.classpath 2014-01-31 15:18:44 UTC (rev 7836) +++ branches/RDR/.classpath 2014-01-31 17:44:48 UTC (rev 7837) @@ -32,7 +32,7 @@ <classpathentry kind="src" path="bigdata-gas/src/java"/> <classpathentry kind="src" path="bigdata-gas/src/test"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/dsi-utils-1.0.6-020610.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/lgpl-utils-1.0.6-020610.jar"/> + <classpathentry kind="lib" path="bigdata/lib/lgpl-utils-1.0.7-270114.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/apache/zookeeper-3.3.3.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-continuation-7.2.2.v20101205.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-http-7.2.2.v20101205.jar"/> Modified: branches/RDR/README =================================================================== --- branches/RDR/README 2014-01-31 15:18:44 UTC (rev 7836) +++ branches/RDR/README 2014-01-31 17:44:48 UTC (rev 7837) @@ -0,0 +1,4 @@ +Please see the release notes in bigdata/src/releases for getting started +links. This will point you to the installation instructions for the +different deployment modes, the online documentation, the wiki, etc. It +will also point you to resources for support, subscriptions, and licensing. Property changes on: branches/RDR/bigdata/lib/jetty ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/lib/jetty:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/lib/jetty:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty:7665-7836 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/MGC_1_3_0/bigdata/lib/jetty:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/lib/jetty:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/lib/jetty:7465-7484 Deleted: branches/RDR/bigdata/lib/lgpl-utils-1.0.6-020610.jar =================================================================== (Binary files differ) Copied: branches/RDR/bigdata/lib/lgpl-utils-1.0.7-270114.jar (from rev 7836, branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/lgpl-utils-1.0.7-270114.jar) =================================================================== (Binary files differ) Modified: branches/RDR/bigdata/src/java/com/bigdata/bop/AbstractAccessPathOp.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/bop/AbstractAccessPathOp.java 2014-01-31 15:18:44 UTC (rev 7836) +++ branches/RDR/bigdata/src/java/com/bigdata/bop/AbstractAccessPathOp.java 2014-01-31 17:44:48 UTC (rev 7837) @@ -76,26 +76,26 @@ super(op); } - /** - * @see BufferAnnotations#CHUNK_CAPACITY - */ - protected int getChunkCapacity() { - - return getProperty(Annotations.CHUNK_CAPACITY, - Annotations.DEFAULT_CHUNK_CAPACITY); +// /** +// * @see BufferAnnotations#CHUNK_CAPACITY +// */ +// protected int getChunkCapacity() { +// +// return getProperty(Annotations.CHUNK_CAPACITY, +// Annotations.DEFAULT_CHUNK_CAPACITY); +// +// } +// +// /** +// * @see BufferAnnotations#CHUNK_OF_CHUNKS_CAPACITY +// */ +// protected int getChunkOfChunksCapacity() { +// +// return getProperty(Annotations.CHUNK_OF_CHUNKS_CAPACITY, +// Annotations.DEFAULT_CHUNK_OF_CHUNKS_CAPACITY); +// +// } - } - - /** - * @see BufferAnnotations#CHUNK_OF_CHUNKS_CAPACITY - */ - protected int getChunkOfChunksCapacity() { - - return getProperty(Annotations.CHUNK_OF_CHUNKS_CAPACITY, - Annotations.DEFAULT_CHUNK_OF_CHUNKS_CAPACITY); - - } - // protected int getFullyBufferedReadThreshold() { // // return getProperty(Annotations.FULLY_BUFFERED_READ_THRESHOLD, @@ -103,14 +103,14 @@ // // } - /** - * @see BufferAnnotations#CHUNK_TIMEOUT - */ - protected long getChunkTimeout() { - - return getProperty(Annotations.CHUNK_TIMEOUT, - Annotations.DEFAULT_CHUNK_TIMEOUT); - - } +// /** +// * @see BufferAnnotations#CHUNK_TIMEOUT +// */ +// protected long getChunkTimeout() { +// +// return getProperty(Annotations.CHUNK_TIMEOUT, +// Annotations.DEFAULT_CHUNK_TIMEOUT); +// +// } } Modified: branches/RDR/bigdata/src/java/com/bigdata/bop/BOpBase.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/bop/BOpBase.java 2014-01-31 15:18:44 UTC (rev 7836) +++ branches/RDR/bigdata/src/java/com/bigdata/bop/BOpBase.java 2014-01-31 17:44:48 UTC (rev 7837) @@ -175,6 +175,7 @@ } + @Override final public Map<String, Object> annotations() { return Collections.unmodifiableMap(annotations); @@ -234,6 +235,7 @@ } + @Override public BOp get(final int index) { return args[index]; @@ -286,6 +288,7 @@ } + @Override public int arity() { return args.length; @@ -297,6 +300,7 @@ * <p> * Note: This is much less efficient than {@link #argIterator()}. */ + @Override final public List<BOp> args() { return Collections.unmodifiableList(Arrays.asList(args)); @@ -309,6 +313,7 @@ * The iterator does not support removal. (This is more efficient than * #args()). */ + @Override final public Iterator<BOp> argIterator() { return new ArgIterator(); @@ -339,6 +344,7 @@ } // shallow copy + @Override public BOp[] toArray() { final BOp[] a = new BOp[args.length]; @@ -475,6 +481,7 @@ // // } + @Override public Object getProperty(final String name) { return annotations.get(name); @@ -543,6 +550,7 @@ } + @Override public BOpBase setProperty(final String name, final Object value) { final BOpBase tmp = (BOpBase) this.clone(); Modified: branches/RDR/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/bop/BOpContext.java 2014-01-31 15:18:44 UTC (rev 7836) +++ branches/RDR/bigdata/src/java/com/bigdata/bop/BOpContext.java 2014-01-31 17:44:48 UTC (rev 7837) @@ -59,8 +59,9 @@ import com.bigdata.rwstore.sector.IMemoryManager; import com.bigdata.striterator.ChunkedFilter; import com.bigdata.striterator.Chunkerator; -import com.bigdata.striterator.CloseableIteratorWrapper; +import com.bigdata.striterator.CloseableChunkedIteratorWrapperConverter; import com.bigdata.striterator.IChunkedIterator; +import com.bigdata.striterator.IChunkedStriterator; import cutthecrap.utils.striterators.ICloseableIterator; @@ -1078,8 +1079,8 @@ } /** - * Convert an {@link IAccessPath#iterator()} into a stream of - * {@link IBindingSet}s. + * Convert an {@link IAccessPath#iterator()} into a stream of chunks of + * {@link IBindingSet}. * * @param src * The iterator draining the {@link IAccessPath}. This will visit @@ -1090,7 +1091,7 @@ * Statistics to be updated as elements and chunks are consumed * (optional). * - * @return The dechunked iterator visiting the solutions. The order of the + * @return An iterator visiting chunks of solutions. The order of the * original {@link IElement}s is preserved. * * @see https://sourceforge.net/apps/trac/bigdata/ticket/209 (AccessPath @@ -1105,14 +1106,15 @@ // * The array of distinct variables (no duplicates) to be // * extracted from the visited {@link IElement}s. @SuppressWarnings({ "rawtypes", "unchecked" }) - static public ICloseableIterator<IBindingSet> solutions( + static public ICloseableIterator<IBindingSet[]> solutions( final IChunkedIterator<?> src, // final IPredicate<?> pred,// // final IVariable<?>[] varsx, final BaseJoinStats stats// ) { - return new CloseableIteratorWrapper( + //return new CloseableIteratorWrapper( + final IChunkedStriterator itr1 = new com.bigdata.striterator.ChunkedStriterator(src).addFilter( // new ChunkedFilter() { new ChunkedFilter<IChunkedIterator<Object>, Object, Object>() { @@ -1160,18 +1162,28 @@ } - })) { + }); + //) { +// +// /** +// * Close the real source if the caller closes the returned iterator. +// */ +// @Override +// public void close() { +// super.close(); +// src.close(); +// } +// }; - /** - * Close the real source if the caller closes the returned iterator. - */ - @Override - public void close() { - super.close(); - src.close(); - } - }; + /* + * Convert from IChunkedIterator<IBindingSet> to + * ICloseableIterator<IBindingSet[]>. This is a fly weight conversion. + */ + final ICloseableIterator<IBindingSet[]> itr2 = new CloseableChunkedIteratorWrapperConverter<IBindingSet>( + itr1); + return itr2; + } /* Modified: branches/RDR/bigdata/src/java/com/bigdata/bop/BOpIdFactory.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/bop/BOpIdFactory.java 2014-01-31 15:18:44 UTC (rev 7836) +++ branches/RDR/bigdata/src/java/com/bigdata/bop/BOpIdFactory.java 2014-01-31 17:44:48 UTC (rev 7837) @@ -1,5 +1,29 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ package com.bigdata.bop; +import java.util.Iterator; import java.util.LinkedHashSet; /** @@ -7,23 +31,105 @@ */ public class BOpIdFactory implements IdFactory { - private final LinkedHashSet<Integer> ids = new LinkedHashSet<Integer>(); - - private int nextId = 0; - - public void reserve(int id) { - ids.add(id); - } + /** The set of reserved bop identifiers. */ + private LinkedHashSet<Integer> ids; - public int nextId() { + private int nextId = 0; - while (ids.contains(nextId)) { + /** + * Reserve a bop id by adding it to a set of known identifiers that will not + * be issued by {@link #nextId()}. + * + * @param id + * The identifier. + */ + public void reserve(final int id) { + + synchronized (this) { + + if (ids == null) { - nextId++; - - } + // Lazily allocated. + ids = new LinkedHashSet<Integer>(); - return nextId++; - } - + ids.add(id); + + } + + } + + } + + @Override + public int nextId() { + + synchronized (this) { + + if (ids != null) { + + while (ids.contains(nextId)) { + + nextId++; + + } + + } + + return nextId++; + + } + + } + + /** + * Reserve ids used by the predicates in some join graph. + * + * @param preds + * The vertices of the join graph. + */ + public void reserveIds(final IPredicate<?>[] preds) { + + if (preds == null) + throw new IllegalArgumentException(); + + for (IPredicate<?> p : preds) { + + reserve(p.getId()); + + } + + } + + /** + * Reserve ids used by the constraints for some predicate or join graph. + * + * @param constraints + * The constraints that attach to some predicate (optional). + */ + public void reserveIds(final IConstraint[] constraints) { + + if (constraints == null) + return; + + for (IConstraint c : constraints) { + + final Iterator<BOp> itr = BOpUtility + .preOrderIteratorWithAnnotations(c); + + while (itr.hasNext()) { + + final BOp y = itr.next(); + + final Integer anId = (Integer) y + .getProperty(BOp.Annotations.BOP_ID); + + if (anId != null) + reserve(anId.intValue()); + + } + + } + + } + } \ No newline at end of file Modified: branches/RDR/bigdata/src/java/com/bigdata/bop/BOpUtility.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/bop/BOpUtility.java 2014-01-31 15:18:44 UTC (rev 7836) +++ branches/RDR/bigdata/src/java/com/bigdata/bop/BOpUtility.java 2014-01-31 17:44:48 UTC (rev 7837) @@ -35,6 +35,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.NoSuchElementException; import java.util.Set; import org.apache.log4j.Logger; @@ -53,6 +54,7 @@ import cutthecrap.utils.striterators.EmptyIterator; import cutthecrap.utils.striterators.Expander; import cutthecrap.utils.striterators.Filter; +import cutthecrap.utils.striterators.ICloseable; import cutthecrap.utils.striterators.ICloseableIterator; import cutthecrap.utils.striterators.SingleValueIterator; import cutthecrap.utils.striterators.Striterator; @@ -72,7 +74,7 @@ * Pre-order recursive visitation of the operator tree (arguments only, no * annotations). */ - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "rawtypes" }) public static Iterator<BOp> preOrderIterator(final BOp op) { return new Striterator(new SingleValueIterator(op)) @@ -466,6 +468,8 @@ * The type of the node to be extracted. * * @return A list containing those references. + * + * @see #visitAll(BOp, Class) */ public static <C> List<C> toList(final BOp op, final Class<C> clas) { @@ -483,6 +487,44 @@ } + /** + * Return the sole instance of the specified class. + * + * @param op + * The root of the traversal. + * @param class1 + * The class to look for. + * @return The sole instance of that class. + * @throws NoSuchElementException + * if there is no such instance. + * @throws RuntimeException + * if there is more than one such instance. + */ + public static <C> C getOnly(final BOp op, final Class<C> class1) { + final Iterator<C> it = visitAll(op, class1); + if (!it.hasNext()) + throw new NoSuchE... [truncated message content] |
From: <mrp...@us...> - 2014-01-31 15:18:54
|
Revision: 7836 http://bigdata.svn.sourceforge.net/bigdata/?rev=7836&view=rev Author: mrpersonick Date: 2014-01-31 15:18:44 +0000 (Fri, 31 Jan 2014) Log Message: ----------- clean up some comments, add a toString() Modified Paths: -------------- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java 2014-01-31 15:18:07 UTC (rev 7835) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java 2014-01-31 15:18:44 UTC (rev 7836) @@ -249,6 +249,23 @@ return numStmts; } + + public String toString() { + + return "numURIs=" + numURIs + + ", numLiterals=" + numLiterals + + ", numBNodes=" + numBNodes + + ", numStmts=" + numStmts + + ", numValues=" + numValues + + ", numSids=" + numSIDs + + ", values.length=" + (values != null ? String.valueOf(values.length) : "null") + + ", stmts.length=" + (stmts != null ? String.valueOf(stmts.length) : "null") + + ", bnodes.size()=" + (bnodes != null ? String.valueOf(bnodes.size()) : "null") + + ", distinctTermMap.size()=" + (distinctTermMap != null ? String.valueOf(distinctTermMap.size()) : "null") + + ", reifiedStmts.size()=" + (reifiedStmts != null ? String.valueOf(reifiedStmts.size()) : "null") + + ", deferredStmts.size()=" + (deferredStmts != null ? String.valueOf(deferredStmts.size()) : "null"); + + } /** * When invoked, the {@link StatementBuffer} will resolve terms against the @@ -1202,22 +1219,12 @@ if (stmt != null) { -// /* -// * Assume for now that bnodes appearing inside the terse -// * syntax without a statement attached are real bnodes, not -// * sids. -// */ -// final boolean tmp = this.statementIdentifiers; -// this.statementIdentifiers = false; - bnode.setStatement(valueFactory.createStatement( (BigdataResource) getDistinctTerm(stmt.getSubject(), true), (BigdataURI) getDistinctTerm(stmt.getPredicate(), true), (BigdataValue) getDistinctTerm(stmt.getObject(), true) )); -// this.statementIdentifiers = tmp; - /* * Do not "add if absent". This is not a real term, just a * composition of other terms. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-01-31 15:18:15
|
Revision: 7835 http://bigdata.svn.sourceforge.net/bigdata/?rev=7835&view=rev Author: mrpersonick Date: 2014-01-31 15:18:07 +0000 (Fri, 31 Jan 2014) Log Message: ----------- detect ungrounded (self-referential) sids Modified Paths: -------------- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNodeImpl.java Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNodeImpl.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNodeImpl.java 2014-01-27 21:23:38 UTC (rev 7834) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNodeImpl.java 2014-01-31 15:18:07 UTC (rev 7835) @@ -119,17 +119,33 @@ } + /** + * Used to detect ungrounded sids (self-referential). + */ + private transient boolean selfRef = false; + @Override public IV getIV() { if (super.iv == null && sid != null) { - - if (sid.getSubject() == this || sid.getObject() == this) + +// if (sid.getSubject() == this || sid.getObject() == this) +// throw new UnificationException("illegal self-referential sid"); + + if (selfRef) { throw new UnificationException("illegal self-referential sid"); + } + + // temporarily set it to true while we get the IVs on the sid + selfRef = true; final IV s = sid.s(); final IV p = sid.p(); final IV o = sid.o(); + + // if we make it to here then we have a fully grounded sid + selfRef = false; + if (s != null && p != null && o != null) { setIV(new SidIV(new SPO(s, p, o))); } @@ -137,7 +153,7 @@ return super.iv; } - + public String toString() { return "_:" + id; @@ -220,7 +236,7 @@ this.statementIdentifier = true; this.sid = sid; } - + /** * Return the statement modeled by this blank node. */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-27 21:23:44
|
Revision: 7834 http://bigdata.svn.sourceforge.net/bigdata/?rev=7834&view=rev Author: thompsonbry Date: 2014-01-27 21:23:38 +0000 (Mon, 27 Jan 2014) Log Message: ----------- Bug fix for [1]. The root cause was an incorrect AST generated from the SPARQL parser. I have added test cases for this to TestSubqueryPatterns. With this fix, the test case in TestTickets for #806 now runs correctly. The entire AST SPARQL test suite is green. Committed to CI. See #806 (Incorrect AST generated for OPTIONAL { SELECT }) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTickets.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/GroupGraphPatternBuilder.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestGroupGraphPatternBuilder.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestSubqueryPatterns.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTickets.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTickets.java 2014-01-27 16:55:56 UTC (rev 7833) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTickets.java 2014-01-27 21:23:38 UTC (rev 7834) @@ -320,9 +320,8 @@ } /** - * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/806" > - * Incorrect computation of shared variables when lifting out named - * subqueries </a> + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/806> + * Incorrect AST generated for OPTIONAL { SELECT }</a> */ public void test_ticket_806() throws Exception { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/GroupGraphPatternBuilder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/GroupGraphPatternBuilder.java 2014-01-27 16:55:56 UTC (rev 7833) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/GroupGraphPatternBuilder.java 2014-01-27 21:23:38 UTC (rev 7834) @@ -273,20 +273,41 @@ graphPattern = new GroupGraphPattern(parentGP); // visit the children. - super.visit(node, null); + final Object tmp = super.visit(node, null); final JoinGroupNode joinGroup = new JoinGroupNode(); joinGroup.setOptional(true); - - @SuppressWarnings("rawtypes") - final GroupNodeBase group = graphPattern.buildGroup(joinGroup); - parentGP.add(group); + if (tmp instanceof SubqueryRoot) { + + /** + * Sub-Select + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/806> + * Incorrect computation of shared variables when lifting out named + * subqueries </a> + */ + joinGroup.addChild((SubqueryRoot) tmp); + } else { + + // GraphPattern + + @SuppressWarnings("rawtypes") + final GroupNodeBase group = graphPattern.buildGroup(joinGroup); + + assert group == joinGroup;// should be the same reference. + + } + + parentGP.add(joinGroup); + graphPattern = parentGP; return null; + } /** Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestGroupGraphPatternBuilder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestGroupGraphPatternBuilder.java 2014-01-27 16:55:56 UTC (rev 7833) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestGroupGraphPatternBuilder.java 2014-01-27 21:23:38 UTC (rev 7834) @@ -1298,4 +1298,53 @@ } + /** + * A unit test for an OPTIONAL wrapping a SERVICE. + */ + public void test_optional_SERVICE() throws MalformedQueryException, + TokenMgrError, ParseException { + + final String serviceExpr = "service ?s { ?s ?p ?o }"; + + final String sparql = "select ?s where { optional { " + serviceExpr + + " } }"; + + final QueryRoot expected = new QueryRoot(QueryType.SELECT); + final ServiceNode service; + { + + { + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + expected.setPrefixDecls(prefixDecls); + } + + { + final ProjectionNode projection = new ProjectionNode(); + projection.addProjectionVar(new VarNode("s")); + expected.setProjection(projection); + + final JoinGroupNode whereClause = new JoinGroupNode(); + expected.setWhereClause(whereClause); + + final JoinGroupNode serviceGraph = new JoinGroupNode(); + serviceGraph.addChild(new StatementPatternNode( + new VarNode("s"), new VarNode("p"), new VarNode("o"), + null/* c */, Scope.DEFAULT_CONTEXTS)); + + service = new ServiceNode(new VarNode("s"), serviceGraph); + service.setExprImage(serviceExpr); + + final JoinGroupNode wrapperGroup = new JoinGroupNode(true/* optional */); + whereClause.addChild(wrapperGroup); + wrapperGroup.addChild(service); + } + + } + + final QueryRoot actual = parse(sparql, baseURI); + + assertSameAST(sparql, expected, actual); + + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestSubqueryPatterns.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestSubqueryPatterns.java 2014-01-27 16:55:56 UTC (rev 7833) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestSubqueryPatterns.java 2014-01-27 21:23:38 UTC (rev 7834) @@ -146,6 +146,71 @@ } /** + * Unit test for simple optional subquery without anything else in the outer + * join group. + * + * <pre> + * SELECT ?s where { OPTIONAL {SELECT ?s where {?s ?p ?o}}} + * </pre> + * + * Note: This requires recursion back in through the + * {@link BigdataExprBuilder}. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/806> + * Incorrect AST generated for OPTIONAL { SELECT }</a> + */ + public void test_optional_subSelect() throws MalformedQueryException, + TokenMgrError, ParseException { + + final String sparql = "select ?s where { optional {select ?s where { ?s ?p ?o } } }"; + + final QueryRoot expected = new QueryRoot(QueryType.SELECT); + final SubqueryRoot subSelect; + { + + { + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + expected.setPrefixDecls(prefixDecls); + } + + { + final ProjectionNode projection = new ProjectionNode(); + projection.addProjectionVar(new VarNode("s")); + expected.setProjection(projection); + + final JoinGroupNode whereClause = new JoinGroupNode(); + expected.setWhereClause(whereClause); + + subSelect = new SubqueryRoot(QueryType.SELECT); +// whereClause.addChild(subSelect); + + final JoinGroupNode wrapperGroup = new JoinGroupNode(true/* optional */); + whereClause.addChild(wrapperGroup); + wrapperGroup.addChild(subSelect); + } + { + + final ProjectionNode projection2 = new ProjectionNode(); + projection2.addProjectionVar(new VarNode("s")); + subSelect.setProjection(projection2); + + final JoinGroupNode whereClause2 = new JoinGroupNode(); + subSelect.setWhereClause(whereClause2); + + whereClause2.addChild(new StatementPatternNode( + new VarNode("s"), new VarNode("p"), new VarNode("o"), + null/* c */, Scope.DEFAULT_CONTEXTS)); + + } + } + + final QueryRoot actual = parse(sparql, baseURI); + + assertSameAST(sparql, expected, actual); + + } + + /** * Unit test for simple subquery joined with a triple pattern in the outer * join group. * This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-01-27 16:56:05
|
Revision: 7833 http://bigdata.svn.sourceforge.net/bigdata/?rev=7833&view=rev Author: mrpersonick Date: 2014-01-27 16:55:56 +0000 (Mon, 27 Jan 2014) Log Message: ----------- no longer overloading the context position. found and fixed most of the test cases that rely on that functionality. there might be a few more. also got rid of the bigdata rdf/xml parser and writer. Modified Paths: -------------- branches/RDR/bigdata/src/resources/logging/log4j.properties branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/bnode/SidIV.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAll.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestStatementBuffer.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-01.ttl branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/store/TestStatementIdentifiers.java branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/TestProvenanceQuery.java branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java Added Paths: ----------- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/provenance01.ttl branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/sids.ttl Removed Paths: ------------- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/provenance01.rdf branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/sids.rdf Modified: branches/RDR/bigdata/src/resources/logging/log4j.properties =================================================================== --- branches/RDR/bigdata/src/resources/logging/log4j.properties 2014-01-27 16:25:06 UTC (rev 7832) +++ branches/RDR/bigdata/src/resources/logging/log4j.properties 2014-01-27 16:55:56 UTC (rev 7833) @@ -18,6 +18,8 @@ #log4j.logger.com.bigdata.rdf.rio.StatementBuffer=ALL #log4j.logger.com.bigdata.rdf.sail.TestProvenanceQuery=ALL +#log4j.logger.com.bigdata.rdf.sail.TestSids=ALL +#log4j.logger.com.bigdata.rdf.sail.ProxyBigdataSailTestCase=ALL # Test suite loggers. #log4j.logger.junit=INFO Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java 2014-01-27 16:25:06 UTC (rev 7832) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java 2014-01-27 16:55:56 UTC (rev 7833) @@ -108,7 +108,7 @@ final RDFParserRegistry r = RDFParserRegistry.getInstance(); - r.add(new BigdataRDFXMLParserFactory()); +// r.add(new BigdataRDFXMLParserFactory()); // // Note: This ensures that the RDFFormat for NQuads is loaded. // r.get(RDFFormat.NQUADS); @@ -120,14 +120,14 @@ } - // Ditto, but for the writer. - { - final RDFWriterRegistry r = RDFWriterRegistry.getInstance(); +// // Ditto, but for the writer. +// { +// final RDFWriterRegistry r = RDFWriterRegistry.getInstance(); +// +// r.add(new BigdataRDFXMLWriterFactory()); +// +// } - r.add(new BigdataRDFXMLWriterFactory()); - - } - // { // final PropertiesParserRegistry r = PropertiesParserRegistry.getInstance(); // Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/bnode/SidIV.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/bnode/SidIV.java 2014-01-27 16:25:06 UTC (rev 7832) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/bnode/SidIV.java 2014-01-27 16:55:56 UTC (rev 7833) @@ -198,6 +198,7 @@ final int signum = key.length > 0 ? 1 : 0; final BigInteger bi = new BigInteger(signum, key); return 's' + bi.toString(); +// return toString(); } /** Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java 2014-01-27 16:25:06 UTC (rev 7832) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java 2014-01-27 16:55:56 UTC (rev 7833) @@ -29,7 +29,7 @@ import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; +import java.util.LinkedHashSet; import java.util.Map; import java.util.Set; @@ -406,7 +406,7 @@ /* * Process deferred statements (NOP unless using statement identifiers). */ - processDeferredStatements(); +// processDeferredStatements(); // flush anything left in the buffer. incrementalWrite(); @@ -418,272 +418,272 @@ } - /** - * Processes the {@link #deferredStmts deferred statements}. - * <p> - * When statement identifiers are enabled the processing of statements using - * blank nodes in their subject or object position must be deferred until we - * know whether or not the blank node is being used as a statement - * identifier (blank nodes are not allowed in the predicate position by the - * RDF data model). If the blank node is being used as a statement - * identifier then its {@link IV} will be assigned based on - * the {s,p,o} triple. If it is being used as a blank node, then the - * {@link IV} is assigned using the blank node ID. - * <p> - * Deferred statements are processed as follows: - * <ol> - * - * <li>Collect all deferred statements whose blank node bindings never show - * up in the context position of a statement ( - * {@link BigdataBNode#getStatementIdentifier()} is <code>false</code>). - * Those blank nodes are NOT statement identifiers so we insert them into - * the lexicon and the insert the collected statements as well.</li> - * - * <li>The remaining deferred statements are processed in "cliques". Each - * clique consists of all remaining deferred statements whose {s,p,o} have - * become fully defined by virtue of a blank node becoming bound as a - * statement identifier. A clique is collected by a full pass over the - * remaining deferred statements. This process repeats until no statements - * are identified (an empty clique or fixed point).</li> - * - * </ol> - * If there are remaining deferred statements then they contain cycles. This - * is an error and an exception is thrown. - * - * @todo on each {@link #flush()}, scan the deferred statements for those - * which are fully determined (bnodes are flagged as statement - * identifiers) to minimize the build up for long documents? - */ - protected void processDeferredStatements() { - - if (!statementIdentifiers || deferredStmts == null - || deferredStmts.isEmpty()) { - - // NOP. - - return; - - } - - if (log.isInfoEnabled()) - log.info("processing " + deferredStmts.size() - + " deferred statements"); - - /* - * Need to flush the terms out to the dictionary or the reification - * process will not work correctly. - */ - incrementalWrite(); - - try { - - // Note: temporary override - clear by finally{}. - statementIdentifiers = false; - - // stage 0 - if (reifiedStmts != null) { - - for (Map.Entry<BigdataBNodeImpl, ReifiedStmt> e : reifiedStmts.entrySet()) { - - final BigdataBNodeImpl sid = e.getKey(); - - final ReifiedStmt reifiedStmt = e.getValue(); - - if (!reifiedStmt.isFullyBound(arity)) { - - log.warn("unfinished reified stmt: " + reifiedStmt); - - continue; - - } - - final BigdataStatement stmt = valueFactory.createStatement( - reifiedStmt.getSubject(), - reifiedStmt.getPredicate(), - reifiedStmt.getObject(), - reifiedStmt.getContext(), - StatementEnum.Explicit); - - sid.setStatement(stmt); - - sid.setIV(new SidIV(new SPO(stmt))); - - if (log.isInfoEnabled()) { - log.info("reified sid conversion: sid=" + sid + ", stmt=" + stmt); - } - - } - - if (log.isInfoEnabled()) { - - for (BigdataBNodeImpl sid : reifiedStmts.keySet()) { - - log.info("sid: " + sid + ", iv=" + sid.getIV()); - - } - - } - - } - - // stage 1. - { - - final int nbefore = deferredStmts.size(); - - int n = 0; - - final Iterator<BigdataStatement> itr = deferredStmts.iterator(); - - while(itr.hasNext()) { - - final BigdataStatement stmt = itr.next(); - - if (stmt.getSubject() instanceof BNode - && ((BigdataBNode) stmt.getSubject()).isStatementIdentifier()) - continue; - - if (stmt.getObject() instanceof BNode - && ((BigdataBNode) stmt.getObject()).isStatementIdentifier()) - continue; - - if(log.isDebugEnabled()) { - log.debug("grounded: "+stmt); - } - - if (stmt.getSubject() instanceof BNode) - addTerm(stmt.getSubject()); - - if (stmt.getObject() instanceof BNode) - addTerm(stmt.getObject()); - - // fully grounded so add to the buffer. - add(stmt); - - // the statement has been handled. - itr.remove(); - - n++; - - } - - if (log.isInfoEnabled()) - log.info(""+ n - + " out of " - + nbefore - + " deferred statements used only blank nodes (vs statement identifiers)."); - - /* - * Flush everything in the buffer so that the blank nodes that - * are really blank nodes will have their term identifiers - * assigned. - */ - - incrementalWrite(); - - } - - // stage 2. - if(!deferredStmts.isEmpty()) { - - int nrounds = 0; - - while(true) { - - nrounds++; - - final int nbefore = deferredStmts.size(); - - final Iterator<BigdataStatement> itr = deferredStmts.iterator(); - - while(itr.hasNext()) { - - final BigdataStatement stmt = itr.next(); - - if (log.isDebugEnabled()) { - log.debug(stmt.getSubject() + ", iv=" + stmt.s()); - } - - if (stmt.getSubject() instanceof BNode - && ((BigdataBNode) stmt.getSubject()).isStatementIdentifier() - && stmt.s() == null) - continue; - - if (stmt.getObject() instanceof BNode - && ((BigdataBNode) stmt.getObject()).isStatementIdentifier() - && stmt.o() == null) - continue; - - if (log.isDebugEnabled()) { - log.debug("round="+nrounds+", grounded: "+stmt); - } - - // fully grounded so add to the buffer. - add(stmt); - - // deferred statement has been handled. - itr.remove(); - - } - - final int nafter = deferredStmts.size(); - - if (log.isInfoEnabled()) - log.info("round=" + nrounds+" : #before="+nbefore+", #after="+nafter); - - if(nafter == nbefore) { - - if (log.isInfoEnabled()) - log.info("fixed point after " + nrounds - + " rounds with " + nafter - + " ungrounded statements"); - - break; - - } - - /* - * Flush the buffer so that we can obtain the statement - * identifiers for all statements in this clique. - */ - - incrementalWrite(); - - } // next clique. - - final int nremaining = deferredStmts.size(); - - if (nremaining > 0) { - - if (log.isDebugEnabled()) { - - for (BigdataStatement s : deferredStmts) { - log.debug("could not ground: " + s); - } - - } - - throw new StatementCyclesException( - "" + nremaining - + " statements can not be grounded"); - - } - - - } // stage 2. - - } finally { - - // Note: restore flag! - statementIdentifiers = true; - - deferredStmts = null; - - reifiedStmts = null; - - } - - } +// /** +// * Processes the {@link #deferredStmts deferred statements}. +// * <p> +// * When statement identifiers are enabled the processing of statements using +// * blank nodes in their subject or object position must be deferred until we +// * know whether or not the blank node is being used as a statement +// * identifier (blank nodes are not allowed in the predicate position by the +// * RDF data model). If the blank node is being used as a statement +// * identifier then its {@link IV} will be assigned based on +// * the {s,p,o} triple. If it is being used as a blank node, then the +// * {@link IV} is assigned using the blank node ID. +// * <p> +// * Deferred statements are processed as follows: +// * <ol> +// * +// * <li>Collect all deferred statements whose blank node bindings never show +// * up in the context position of a statement ( +// * {@link BigdataBNode#getStatementIdentifier()} is <code>false</code>). +// * Those blank nodes are NOT statement identifiers so we insert them into +// * the lexicon and the insert the collected statements as well.</li> +// * +// * <li>The remaining deferred statements are processed in "cliques". Each +// * clique consists of all remaining deferred statements whose {s,p,o} have +// * become fully defined by virtue of a blank node becoming bound as a +// * statement identifier. A clique is collected by a full pass over the +// * remaining deferred statements. This process repeats until no statements +// * are identified (an empty clique or fixed point).</li> +// * +// * </ol> +// * If there are remaining deferred statements then they contain cycles. This +// * is an error and an exception is thrown. +// * +// * @todo on each {@link #flush()}, scan the deferred statements for those +// * which are fully determined (bnodes are flagged as statement +// * identifiers) to minimize the build up for long documents? +// */ +// protected void processDeferredStatements() { +// +// if (!statementIdentifiers || deferredStmts == null +// || deferredStmts.isEmpty()) { +// +// // NOP. +// +// return; +// +// } +// +// if (log.isInfoEnabled()) +// log.info("processing " + deferredStmts.size() +// + " deferred statements"); +// +// /* +// * Need to flush the terms out to the dictionary or the reification +// * process will not work correctly. +// */ +// incrementalWrite(); +// +// try { +// +// // Note: temporary override - clear by finally{}. +// statementIdentifiers = false; +// +// // stage 0 +// if (reifiedStmts != null) { +// +// for (Map.Entry<BigdataBNodeImpl, ReifiedStmt> e : reifiedStmts.entrySet()) { +// +// final BigdataBNodeImpl sid = e.getKey(); +// +// final ReifiedStmt reifiedStmt = e.getValue(); +// +// if (!reifiedStmt.isFullyBound(arity)) { +// +// log.warn("unfinished reified stmt: " + reifiedStmt); +// +// continue; +// +// } +// +// final BigdataStatement stmt = valueFactory.createStatement( +// reifiedStmt.getSubject(), +// reifiedStmt.getPredicate(), +// reifiedStmt.getObject(), +// reifiedStmt.getContext(), +// StatementEnum.Explicit); +// +// sid.setStatement(stmt); +// +// sid.setIV(new SidIV(new SPO(stmt))); +// +// if (log.isInfoEnabled()) { +// log.info("reified sid conversion: sid=" + sid + ", stmt=" + stmt); +// } +// +// } +// +// if (log.isInfoEnabled()) { +// +// for (BigdataBNodeImpl sid : reifiedStmts.keySet()) { +// +// log.info("sid: " + sid + ", iv=" + sid.getIV()); +// +// } +// +// } +// +// } +// +// // stage 1. +// { +// +// final int nbefore = deferredStmts.size(); +// +// int n = 0; +// +// final Iterator<BigdataStatement> itr = deferredStmts.iterator(); +// +// while(itr.hasNext()) { +// +// final BigdataStatement stmt = itr.next(); +// +// if (stmt.getSubject() instanceof BNode +// && ((BigdataBNode) stmt.getSubject()).isStatementIdentifier()) +// continue; +// +// if (stmt.getObject() instanceof BNode +// && ((BigdataBNode) stmt.getObject()).isStatementIdentifier()) +// continue; +// +// if(log.isDebugEnabled()) { +// log.debug("grounded: "+stmt); +// } +// +// if (stmt.getSubject() instanceof BNode) +// addTerm(stmt.getSubject()); +// +// if (stmt.getObject() instanceof BNode) +// addTerm(stmt.getObject()); +// +// // fully grounded so add to the buffer. +// add(stmt); +// +// // the statement has been handled. +// itr.remove(); +// +// n++; +// +// } +// +// if (log.isInfoEnabled()) +// log.info(""+ n +// + " out of " +// + nbefore +// + " deferred statements used only blank nodes (vs statement identifiers)."); +// +// /* +// * Flush everything in the buffer so that the blank nodes that +// * are really blank nodes will have their term identifiers +// * assigned. +// */ +// +// incrementalWrite(); +// +// } +// +// // stage 2. +// if(!deferredStmts.isEmpty()) { +// +// int nrounds = 0; +// +// while(true) { +// +// nrounds++; +// +// final int nbefore = deferredStmts.size(); +// +// final Iterator<BigdataStatement> itr = deferredStmts.iterator(); +// +// while(itr.hasNext()) { +// +// final BigdataStatement stmt = itr.next(); +// +// if (log.isDebugEnabled()) { +// log.debug(stmt.getSubject() + ", iv=" + stmt.s()); +// } +// +// if (stmt.getSubject() instanceof BNode +// && ((BigdataBNode) stmt.getSubject()).isStatementIdentifier() +// && stmt.s() == null) +// continue; +// +// if (stmt.getObject() instanceof BNode +// && ((BigdataBNode) stmt.getObject()).isStatementIdentifier() +// && stmt.o() == null) +// continue; +// +// if (log.isDebugEnabled()) { +// log.debug("round="+nrounds+", grounded: "+stmt); +// } +// +// // fully grounded so add to the buffer. +// add(stmt); +// +// // deferred statement has been handled. +// itr.remove(); +// +// } +// +// final int nafter = deferredStmts.size(); +// +// if (log.isInfoEnabled()) +// log.info("round=" + nrounds+" : #before="+nbefore+", #after="+nafter); +// +// if(nafter == nbefore) { +// +// if (log.isInfoEnabled()) +// log.info("fixed point after " + nrounds +// + " rounds with " + nafter +// + " ungrounded statements"); +// +// break; +// +// } +// +// /* +// * Flush the buffer so that we can obtain the statement +// * identifiers for all statements in this clique. +// */ +// +// incrementalWrite(); +// +// } // next clique. +// +// final int nremaining = deferredStmts.size(); +// +// if (nremaining > 0) { +// +// if (log.isDebugEnabled()) { +// +// for (BigdataStatement s : deferredStmts) { +// log.debug("could not ground: " + s); +// } +// +// } +// +// throw new StatementCyclesException( +// "" + nremaining +// + " statements can not be grounded"); +// +// } +// +// +// } // stage 2. +// +// } finally { +// +// // Note: restore flag! +// statementIdentifiers = true; +// +// deferredStmts = null; +// +// reifiedStmts = null; +// +// } +// +// } /** * Clears all buffered data, including the canonicalizing mapping for blank @@ -770,13 +770,19 @@ */ protected void incrementalWrite() { + /* + * Look for non-sid bnodes and add them to the values to be written + * to the database (if they haven't already been written). + */ if (bnodes != null) { for (BigdataBNode bnode : bnodes.values()) { + // sid, skip if (bnode.isStatementIdentifier()) continue; + // already written, skip if (bnode.getIV() != null) continue; @@ -973,12 +979,6 @@ final BigdataStatement stmt = stmts[i]; - /* - * Note: context position is not passed when statement identifiers - * are in use since the statement identifier is assigned based on - * the {s,p,o} triple. - */ - final SPO spo = new SPO(stmt); if (log.isDebugEnabled()) @@ -995,15 +995,6 @@ } /* - * When true, we will be handling statement identifiers. - * - * Note: this is based on the flag on the database rather than the flag - * on the StatementBuffer since the latter is temporarily overridden when - * processing deferred statements. - */ - final boolean sids = database.getStatementIdentifiers(); - - /* * Note: When handling statement identifiers, we clone tmp[] to avoid a * side-effect on its order so that we can unify the assigned statement * identifiers below. @@ -1015,77 +1006,77 @@ // final long nwritten = writeSPOs(sids ? tmp.clone() : tmp, numStmts); final long nwritten = writeSPOs(tmp.clone(), numStmts); - if (sids) { - - /* - * Unify each assigned statement identifier with the context - * position on the corresponding statement. - */ - - for (int i = 0; i < numStmts; i++) { - - final SPO spo = tmp[i]; - - final BigdataStatement stmt = stmts[i]; - - // verify that the BigdataStatement and SPO are the same triple. - assert stmt.s() == spo.s; - assert stmt.p() == spo.p; - assert stmt.o() == spo.o; - - final BigdataResource c = stmt.getContext(); - - if (c == null) - continue; - -// if (c instanceof URI) { +// if (sids) { // -// throw new UnificationException( -// "URI not permitted in context position when statement identifiers are enabled: " -// + stmt); +// /* +// * Unify each assigned statement identifier with the context +// * position on the corresponding statement. +// */ +// +// for (int i = 0; i < numStmts; i++) { +// +// final SPO spo = tmp[i]; +// +// final BigdataStatement stmt = stmts[i]; +// +// // verify that the BigdataStatement and SPO are the same triple. +// assert stmt.s() == spo.s; +// assert stmt.p() == spo.p; +// assert stmt.o() == spo.o; +// +// final BigdataResource c = stmt.getContext(); +// +// if (c == null) +// continue; +// +//// if (c instanceof URI) { +//// +//// throw new UnificationException( +//// "URI not permitted in context position when statement identifiers are enabled: " +//// + stmt); +//// +//// } +// +// if( c instanceof BNode) { +// +// final IV sid = spo.getStatementIdentifier(); // +// if(c.getIV() != null) { +// +// if (!sid.equals(c.getIV())) { +// +// throw new UnificationException( +// "Can not unify blankNode " +// + c +// + "(" +// + c.getIV() +// + ")" +// + " in context position with statement identifier=" +// + sid + ": " + stmt + " (" + spo +// + ")"); +// +// } +// +// } else { +// +// // assign the statement identifier. +// c.setIV(sid); +// +// if (log.isDebugEnabled()) { +// +// log.debug("Assigned statement identifier: " + c +// + "=" + sid); +// +// } +// +// } +// // } - - if( c instanceof BNode) { +// +// } +// +// } - final IV sid = spo.getStatementIdentifier(); - - if(c.getIV() != null) { - - if (!sid.equals(c.getIV())) { - - throw new UnificationException( - "Can not unify blankNode " - + c - + "(" - + c.getIV() - + ")" - + " in context position with statement identifier=" - + sid + ": " + stmt + " (" + spo - + ")"); - - } - - } else { - - // assign the statement identifier. - c.setIV(sid); - - if (log.isDebugEnabled()) { - - log.debug("Assigned statement identifier: " + c - + "=" + sid); - - } - - } - - } - - } - - } - // Copy the state of the isModified() flag for (int i = 0; i < numStmts; i++) { @@ -1346,6 +1337,10 @@ } else if (term instanceof BNode) { + /* + * Handle bnodes separately, in incrementalWrite(). + */ + // if (!statementIdentifiers) { // // numBNodes++; @@ -1409,102 +1404,101 @@ final BigdataStatement stmt = valueFactory.createStatement(s, p, o, c, type); - if (statementIdentifiers - && ((s instanceof BNode && ((BigdataBNode) s).getStatement() == null) -// || -// (o instanceof BNode && ((BigdataBNode) o).getStatement() == null) - )) { + /* + * Specifically looking for reification syntax: + * _:sid rdf:type Statement . + * _:sid rdf:subject <S> . + * _:sid rdf:predicate <P> . + * _:sid rdf:object <O> . + */ + if (statementIdentifiers && s instanceof BNode) { + + if (equals(p, RDF_SUBJECT, RDF_PREDICATE, RDF_OBJECT)) { + + final BigdataBNodeImpl sid = (BigdataBNodeImpl) s; + + if (sid.getStatement() != null) { - /* - * When statement identifiers are enabled a statement with a - * blank node in the subject or object position must be deferred - * until the end of the source so that we determine whether it - * is being used as a statement identifier or a blank node (if - * the blank node occurs in the context position, then we know - * that it is being used as a statement identifier). - */ - - if (//s instanceof BNode && - equals(p, RDF_SUBJECT, RDF_PREDICATE, RDF_OBJECT)) { - - final BigdataBNodeImpl sid = (BigdataBNodeImpl) s; + checkSid(sid, p, o); + + log.warn("seeing a duplicate value for " + sid + ": " + p +"=" + o); + + return; + + } + + if (reifiedStmts == null) { + + reifiedStmts = new HashMap<BigdataBNodeImpl, ReifiedStmt>(); + + } + + final ReifiedStmt reifiedStmt; + if (reifiedStmts.containsKey(sid)) { + + reifiedStmt = reifiedStmts.get(sid); + + } else { + + reifiedStmt = new ReifiedStmt(); + + reifiedStmts.put(sid, reifiedStmt); + + } + + reifiedStmt.set(p, o); + + if (log.isDebugEnabled()) + log.debug("reified piece: "+stmt); + + if (reifiedStmt.isFullyBound(arity)) { + + sid.setStatement(reifiedStmt.toStatement(valueFactory)); + + reifiedStmts.remove(sid); + + } + + return; + + } else if (equals(o, RDF_STATEMENT) && equals(p, RDF_TYPE)) { - if (reifiedStmts == null) { - - reifiedStmts = new HashMap<BigdataBNodeImpl, ReifiedStmt>(); - - } + /* + * Ignore these statements. + * + * _:sid rdf:type rdf:Statement . + */ + return; - final ReifiedStmt reifiedStmt; - if (reifiedStmts.containsKey(sid)) { - - reifiedStmt = reifiedStmts.get(sid); - - } else { - - reifiedStmt = new ReifiedStmt(); - - reifiedStmts.put(sid, reifiedStmt); - - } - - reifiedStmt.set(p, (BigdataValue) o); - - if (log.isDebugEnabled()) - log.debug("reified piece: "+stmt); - - if (reifiedStmt.isFullyBound(arity)) { - - sid.setStatement(reifiedStmt.toStatement(valueFactory)); - - reifiedStmts.remove(sid); - - } - - return; + } - } -// else { -// -// if (deferredStmts == null) { -// -// deferredStmts = new HashSet<BigdataStatement>(stmts.length); -// -// } -// -// deferredStmts.add(stmt); -// -// if (log.isDebugEnabled()) -// log.debug("deferred: "+stmt); -// -// } -// -// } else { - } - if (statementIdentifiers && s instanceof BNode && - equals(o, RDF_STATEMENT) && equals(p, RDF_TYPE)) { - - // ignore this statement - - return; - - } - - // add to the buffer. - stmts[numStmts++] = stmt; + // add to the buffer. + stmts[numStmts++] = stmt; +// if (c != null && statementIdentifiers && c instanceof BNode) { +// +// ((BigdataBNodeImpl) c).setStatement(stmt); +// // } - if (c != null && statementIdentifiers && c instanceof BNode) { - - ((BigdataBNodeImpl) c).setStatement(stmt); - - } - } + private void checkSid(final BigdataBNode sid, final URI p, final Value o) { + + final BigdataStatement stmt = sid.getStatement(); + + if ((p == RDF_SUBJECT && stmt.getSubject() != o) || + (p == RDF_PREDICATE && stmt.getPredicate() != o) || + (p == RDF_OBJECT && stmt.getObject() != o)) { + + throw new UnificationException("sid cannot refer to multiple statements"); + + } + + } + private boolean equals(final BigdataValue v1, final BigdataValue... v2) { if (v2.length == 1) { Modified: branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAll.java =================================================================== --- branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAll.java 2014-01-27 16:25:06 UTC (rev 7832) +++ branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAll.java 2014-01-27 16:55:56 UTC (rev 7833) @@ -67,14 +67,14 @@ */ suite.addTestSuite(TestLoadAndVerify.class); - /* - * Correctness tests when SIDs are enabled and for blank node handling - * using StatementBuffer and explicitly inserting specific triples (no - * parsing). The RDF/XML interchange tests serialize the hand loaded - * data and verify that it can be parsed and that the same graph is - * obtained. - */ - suite.addTestSuite(TestRDFXMLInterchangeWithStatementIdentifiers.class); +// /* +// * Correctness tests when SIDs are enabled and for blank node handling +// * using StatementBuffer and explicitly inserting specific triples (no +// * parsing). The RDF/XML interchange tests serialize the hand loaded +// * data and verify that it can be parsed and that the same graph is +// * obtained. +// */ +// suite.addTestSuite(TestRDFXMLInterchangeWithStatementIdentifiers.class); /* * Test suite for "SIDS" support for NTRIPLES data. This test targets a Modified: branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestStatementBuffer.java =================================================================== --- branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestStatementBuffer.java 2014-01-27 16:25:06 UTC (rev 7832) +++ branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestStatementBuffer.java 2014-01-27 16:55:56 UTC (rev 7833) @@ -135,7 +135,7 @@ buffer.handleStatement(s1, p1, o1, c1, StatementEnum.Explicit); - assertEquals(7, buffer.numURIs); + assertEquals(8, buffer.numURIs); assertEquals(0, buffer.numLiterals); assertEquals(0, buffer.numBNodes); assertEquals(1, buffer.numStmts); @@ -151,7 +151,7 @@ buffer.handleStatement(s2, p2, o2, c2, StatementEnum.Explicit); - assertEquals(8, buffer.numURIs); // only 4 since one is duplicate. + assertEquals(9, buffer.numURIs); // only 4 since one is duplicate. assertEquals(1, buffer.numLiterals); assertEquals(0, buffer.numBNodes); assertEquals(2, buffer.numStmts); @@ -167,7 +167,7 @@ buffer.handleStatement(s3, p3, o3, c3, StatementEnum.Explicit); - assertEquals(8, buffer.numURIs); + assertEquals(9, buffer.numURIs); assertEquals(1, buffer.numLiterals); assertEquals(0, buffer.numBNodes); assertEquals(3, buffer.numStmts); @@ -178,7 +178,7 @@ buffer.handleStatement(s3, p3, o3, c3, StatementEnum.Explicit); - assertEquals(8, buffer.numURIs); + assertEquals(9, buffer.numURIs); assertEquals(1, buffer.numLiterals); assertEquals(0, buffer.numBNodes); assertEquals(4, buffer.numStmts); Modified: branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-01.ttl =================================================================== --- branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-01.ttl 2014-01-27 16:25:06 UTC (rev 7832) +++ branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-01.ttl 2014-01-27 16:55:56 UTC (rev 7833) @@ -27,13 +27,6 @@ _:s1 dc:source <http://hr.example.com/employees#bob> ; dc:created "2012-02-05T12:34:00Z"^^xsd:dateTime . -_:s1 rdf:subject bd:alice . -_:s1 rdf:predicate foaf:mbox . -_:s1 rdf:object <mailto:alice@work> . -_:s1 rdf:type rdf:Statement . -_:s1 dc:source <http://hr.example.com/employees#bob> ; - dc:created "2012-02-05T12:34:00Z"^^xsd:dateTime . - # Terse #<<bd:alice foaf:knows bd:bob>> # dc:source re:engine_1; Modified: branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/store/TestStatementIdentifiers.java =================================================================== --- branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/store/TestStatementIdentifiers.java 2014-01-27 16:25:06 UTC (rev 7832) +++ branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/store/TestStatementIdentifiers.java 2014-01-27 16:55:56 UTC (rev 7833) @@ -297,50 +297,50 @@ } - final BigdataStatementIterator itr = store.getStatements(null, null, null); +// final BigdataStatementIterator itr = store.getStatements(null, null, null); +// +// try { +// +// final Writer w = new StringWriter(); +// +//// RDFXMLWriter rdfWriter = new RDFXMLWriter(w); +// +// final RDFWriterFactory writerFactory = RDFWriterRegistry +// .getInstance().get(RDFFormat.RDFXML); +// +// assertNotNull(writerFactory); +// +// if (!(writerFactory instanceof BigdataRDFXMLWriterFactory)) +// fail("Expecting " + BigdataRDFXMLWriterFactory.class + " not " +// + writerFactory.getClass()); +// +// final RDFWriter rdfWriter = writerFactory.getWriter(w); +// +// rdfWriter.startRDF(); +// +// while(itr.hasNext()) { +// +// final Statement stmt = itr.next(); +// +// rdfWriter.handleStatement(stmt); +// +// } +// +// rdfWriter.endRDF(); +// +// if (log.isInfoEnabled()) +// log.info(w.toString()); +// +// } catch(Exception ex) { +// +// throw new RuntimeException(ex); +// +// } finally { +// +// itr.close(); +// +// } - try { - - final Writer w = new StringWriter(); - -// RDFXMLWriter rdfWriter = new RDFXMLWriter(w); - - final RDFWriterFactory writerFactory = RDFWriterRegistry - .getInstance().get(RDFFormat.RDFXML); - - assertNotNull(writerFactory); - - if (!(writerFactory instanceof BigdataRDFXMLWriterFactory)) - fail("Expecting " + BigdataRDFXMLWriterFactory.class + " not " - + writerFactory.getClass()); - - final RDFWriter rdfWriter = writerFactory.getWriter(w); - - rdfWriter.startRDF(); - - while(itr.hasNext()) { - - final Statement stmt = itr.next(); - - rdfWriter.handleStatement(stmt); - - } - - rdfWriter.endRDF(); - - if (log.isInfoEnabled()) - log.info(w.toString()); - - } catch(Exception ex) { - - throw new RuntimeException(ex); - - } finally { - - itr.close(); - - } - /* * Verify after restart. */ @@ -768,7 +768,10 @@ StatementBuffer buf = new StatementBuffer(store, 100/* capacity */); // statement about itself is a cycle. - buf.add(sid1, rdfType, A, sid1); + buf.add(sid1, RDF.TYPE, A); + buf.add(sid1, RDF.SUBJECT, sid1); + buf.add(sid1, RDF.PREDICATE, RDF.TYPE); + buf.add(sid1, RDF.OBJECT, A); /* * Flush to the database, resolving statement identifiers as @@ -830,16 +833,23 @@ { StatementBuffer buf = new StatementBuffer(store, 100/* capacity */); - // a cycle with a period of one. - buf.add(sid2, rdfType, B, sid1); - buf.add(sid1, rdfType, B, sid2); - /* * Flush to the database, resolving statement identifiers as * necessary. */ try { + // a cycle with a period of one. + buf.add(sid2, RDF.TYPE, B); + buf.add(sid1, RDF.SUBJECT, sid2); + buf.add(sid1, RDF.PREDICATE, RDF.TYPE); + buf.add(sid1, RDF.OBJECT, B); + + buf.add(sid1, RDF.TYPE, B); + buf.add(sid2, RDF.SUBJECT, sid1); + buf.add(sid2, RDF.PREDICATE, RDF.TYPE); + buf.add(sid2, RDF.OBJECT, B); + buf.flush(); fail("Expecting: "+UnificationException.class); @@ -888,16 +898,23 @@ StatementBuffer buf = new StatementBuffer(store, 100/* capacity */); - // same blank node in both two distinct statement is an error. - buf.add(A, rdfType, C, sid1); - buf.add(B, rdfType, C, sid1); - /* * Flush to the database, resolving statement identifiers as * necessary. */ try { + // same blank node in both two distinct statement is an error. + buf.add(A, RDF.TYPE, C); + buf.add(sid1, RDF.SUBJECT, A); + buf.add(sid1, RDF.PREDICATE, RDF.TYPE); + buf.add(sid1, RDF.OBJECT, C); + + buf.add(B, RDF.TYPE, C); + buf.add(sid1, RDF.SUBJECT, B); + buf.add(sid1, RDF.PREDICATE, RDF.TYPE); + buf.add(sid1, RDF.OBJECT, C); + buf.flush(); fail("Expecting: "+UnificationException.class); Modified: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java 2014-01-27 16:25:06 UTC (rev 7832) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java 2014-01-27 16:55:56 UTC (rev 7833) @@ -37,6 +37,7 @@ import junit.extensions.proxy.IProxyTest; import junit.framework.Test; +import org.apache.log4j.Logger; import org.openrdf.model.Resource; import org.openrdf.query.Binding; import org.openrdf.query.BindingSet; @@ -68,6 +69,8 @@ implements IProxyTest { +// protected final transient static Logger log = Logger.getLogger(ProxyBigdataSailTestCase.class); + public ProxyBigdataSailTestCase() {} public ProxyBigdataSailTestCase(String name){super(name);} Modified: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/TestProvenanceQuery.java =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/TestProvenanceQuery.java 2014-01-27 16:25:06 UTC (rev 7832) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/TestProvenanceQuery.java 2014-01-27 16:55:56 UTC (rev 7833) @@ -100,8 +100,8 @@ final DataLoader dataLoader = sail.database.getDataLoader(); dataLoader.loadData( - "bigdata-sails/src/test/com/bigdata/rdf/sail/provenance01.rdf", - ""/*baseURL*/, RDFFormat.RDFXML); + "bigdata-sails/src/test/com/bigdata/rdf/sail/provenance01.ttl", + ""/*baseURL*/, RDFFormat.TURTLE); } Modified: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java 2014-01-27 16:25:06 UTC (rev 7832) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java 2014-01-27 16:55:56 UTC (rev 7833) @@ -28,12 +28,9 @@ import java.util.Properties; import org.apache.log4j.Logger; -import org.openrdf.model.Statement; import org.openrdf.model.URI; -import org.openrdf.model.ValueFactory; import org.openrdf.model.impl.URIImpl; import org.openrdf.model.vocabulary.RDF; -import org.openrdf.model.vocabulary.RDFS; import org.openrdf.query.Binding; import org.openrdf.query.BindingSet; import org.openrdf.query.QueryLanguage; @@ -43,18 +40,11 @@ import org.openrdf.rio.RDFFormat; import com.bigdata.rdf.axioms.NoAxioms; +import com.bigdata.rdf.model.BigdataBNode; import com.bigdata.rdf.model.BigdataStatement; -import com.bigdata.rdf.model.BigdataURI; import com.bigdata.rdf.model.BigdataValueFactory; -import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.store.AbstractTripleStore; -import com.bigdata.rdf.store.BD; -import com.bigdata.rdf.store.BigdataStatementIterator; import com.bigdata.rdf.vocab.NoVocabulary; -import com.bigdata.relation.accesspath.IAccessPath; -import cutthecrap.utils.striterators.ICloseableIterator; - /** * Test case for reverse lookup from SID to statement. * @@ -108,7 +98,7 @@ cxn.setAutoCommit(false); - cxn.add(getClass().getResourceAsStream("sids.rdf"), "", RDFFormat.RDFXML); + cxn.add(getClass().getResourceAsStream("sids.ttl"), "", RDFFormat.TURTLE); /* * Note: The either flush() or commit() is required to flush the @@ -132,9 +122,10 @@ "PREFIX myns: <http://mynamespace.com#> " + "SELECT distinct ?s ?p ?o " + " { " + - " ?sid myns:creator <http://1.com> . " + + " <<"+(s == null ? "?s" : "<"+s+">")+" ?p ?o>> myns:creator <http://1.com> . " + +// " ?sid myns:creator <http://1.com> . " + // " graph ?sid { ?s ?p ?o } " + - " graph ?sid { "+(s == null ? "?s" : "<"+s+">")+" ?p ?o } " + +// " graph ?sid { "+(s == null ? "?s" : "<"+s+">")+" ?p ?o } " + " }"; final TupleQuery tupleQuery = @@ -206,7 +197,7 @@ cxn.setAutoCommit(false); - final ValueFactory vf = sail.getValueFactory(); + final BigdataValueFactory vf = (BigdataValueFactory) sail.getValueFactory(); final URI host1 = vf.createURI("http://localhost/host1"); final URI host = vf.createURI("http://domainnamespace.com/host#Host"); @@ -239,27 +230,33 @@ // cxn.add(swtch2, RDF.TYPE, swtch, sid5); // cxn.add(sid5, creator, src2); - final Statement s1 = vf.createStatement(host1, RDF.TYPE, host, vf.createBNode()); - final Statement s2 = vf.createStatement(host1, connectedTo, swtch1, vf.createBNode()); - final Statement s3 = vf.createStatement(host1, connectedTo, swtch2, vf.createBNode()); - final Statement s4 = vf.createStatement(swtch1, RDF.TYPE, swtch, vf.createBNode()); - final Statement s5 = vf.createStatement(swtch2, RDF.TYPE, swtch, vf.createBNode()); + final BigdataStatement s1 = vf.createStatement(host1, RDF.TYPE, host, vf.createBNode()); + final BigdataStatement s2 = vf.createStatement(host1, connectedTo, swtch1, vf.createBNode()); + final BigdataStatement s3 = vf.createStatement(host1, connectedTo, swtch2, vf.createBNode()); + final BigdataStatement s4 = vf.createStatement(swtch1, RDF.TYPE, swtch, vf.createBNode()); + final BigdataStatement s5 = vf.createStatement(swtch2, RDF.TYPE, swtch, vf.createBNode()); + final BigdataBNode sid1 = vf.createBNode(s1); + final BigdataBNode sid2 = vf.createBNode(s2); + final BigdataBNode sid3 = vf.createBNode(s3); + final BigdataBNode sid4 = vf.createBNode(s4); + final BigdataBNode sid5 = vf.createBNode(s5); + cxn.add(s1); - cxn.add(s1.getContext(), creator, src1); - cxn.add(s1.getContext(), creator, src2); + cxn.add(sid1, creator, src1); + cxn.add(sid1, creator, src2); cxn.add(s2); - cxn.add(s2.getContext(), creator, src1); + cxn.add(sid2, creator, src1); cxn.add(s3); - cxn.add(s3.getContext(), creator, src2); + cxn.add(sid3, creator, src2); cxn.add(s4); - cxn.add(s4.getContext(), creator, src1); + cxn.add(sid4, creator, src1); cxn.add(s5); - cxn.add(s5.getContext(), creator, src2); + cxn.add(sid5, creator, src2); cxn.flush();//commit(); @@ -278,9 +275,10 @@ "PREFIX myns: <http://mynamespace.com#> " + "SELECT distinct ?s ?p ?o " + " { " + - " ?sid myns:creator <http://1.com> . " + -// " graph ?sid { ?s ?p ?o } " + - " graph ?sid { "+(s == null ? "?s" : "<"+s+">")+" ?p ?o } " + + " <<"+(s == null ? "?s" : "<"+s+">")+" ?p ?o>> myns:creator <http://1.com> . " + +// " ?sid myns:creator <http://1.com> . " + +//// " graph ?sid { ?s ?p ?o } " + +// " graph ?sid { "+(s == null ? "?s" : "<"+s+">")+" ?p ?o } " + " }"; final TupleQuery tupleQuery = Deleted: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/provenance01.rdf =================================================================== (Binary files differ) Copied: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/provenance01.ttl (from rev 7809, branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/provenance01.rdf) =================================================================== (Binary files differ) Property changes on: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/provenance01.ttl ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Deleted: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/sids.rdf =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/sids.rdf 2014-01-27 16:25:06 UTC (rev 7832) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/sids.rdf 2014-01-27 16:55:56 UTC (rev 7833) @@ -1,82 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> - -<rdf:RDF - - xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" - - xmlns:bigdata="http://www.bigdata.com/rdf#"> - - - -<rdf:Description rdf:about="http://localhost/host1"> - - <rdf:type bigdata:sid="S195" bigdata:statementType="Explicit" rdf:resource="http://domainnamespace.com/host#Host"/> - - <connectedTo xmlns="http://mynamespace.com#" bigdata:sid="S199" bigdata:statementType="Explicit" rdf:resource="http://localhost/switch1"/> - - <connectedTo xmlns="http://mynamespace.com#" bigdata:sid="S227" bigdata:statementType="Explicit" rdf:resource="http://localhost/switch2"/> - -</rdf:Description> - - - -<rdf:Description rdf:about="http://localhost/switch1"> - - <rdf:type bigdata:sid="S203" bigdata:statementType="Explicit" rdf:resource="http://domainnamespace.com/san#Switch"/> - -</rdf:Description> - - - -<rdf:Description rdf:nodeID="S195"> - - <creator xmlns="http://mynamespace.com#" bigdata:sid="S211" bigdata:statementType="Explicit" rdf:resource="http://1.com"/> - - <creator xmlns="http://mynamespace.com#" bigdata:sid="S239" bigdata:statementType="Explicit" rdf:resource="http://2.com"/> - -</rdf:Description> - - - -<rdf:Description rdf:nodeID="S199"> - - <creator xmlns="http://mynamespace.com#" bigdata:sid="S215" bigdata:statementType="Explicit" rdf:resource="http://1.com"/> - -</rdf:Description> - - - -<rdf:Description rdf:nodeID="S203"> - - <creator xmlns="http://mynamespace.com#" bigdata:sid="S219" bigdata:statementType="Explicit" rdf:resource="http://1.com"/> - -</rdf:Description> - - - -<rdf:Description rdf:about="http://localhost/switch2"> - - <rdf:type bigdata:sid="S231" bigdata:statementType="Explicit" rdf:resource="http://domainnamespace.com/san#Switch"/> - -</rdf:Description> - - - -<rdf:Description rdf:nodeID="S227"> - - <creator xmlns="http://mynamespace.com#" bigdata:sid="S243" bigdata:statementType="Explicit" rdf:resource="http://2.com"/> - -</rdf:Description> - - - -<rdf:Description rdf:nodeID="S231"> - - <creator xmlns="http://mynamespace.com#" bigdata:sid="S247" bigdata:statementType="Explicit" rdf:resource="http://2.com"/> - -</rdf:Description> - - - -</rdf:RDF> - Copied: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/sids.ttl (from rev 7809, branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/sids.rdf) =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/sids.ttl (rev 0) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/sids.ttl 2014-01-27 16:55:56 UTC (rev 7833) @@ -0,0 +1,19 @@ +@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . +@prefix bigdata: <http://www.bigdata.com/rdf#> . +@prefix myns: <http://mynamespace.com#> . + +<http://localhost/host1> rdf:type <http://domainnamespace.com/host#Host> . +<<<http://localhost/host1> rdf:type <http://domainnamespace.com/host#Host>>> myns:creator <http://1.com> . +<<<http://localhost/host1> rdf:type <http://domainnamespace.com/host#Host>>> myns:creator <http://2.com> . + +<http://localhost/host1> myns:connectedTo <http://localhost/switch1> . +<<<http://localhost/host1> myns:connectedTo <http://localhost/switch1>>> myns:creator <http://1.com> . + +<http://localhost/host1> myns:... [truncated message content] |
From: <tho...@us...> - 2014-01-27 16:25:13
|
Revision: 7832 http://bigdata.svn.sourceforge.net/bigdata/?rev=7832&view=rev Author: thompsonbry Date: 2014-01-27 16:25:06 +0000 (Mon, 27 Jan 2014) Log Message: ----------- A review of the IRabaCoder implementations revealed that the new FrontCodedRabaCoderDupKeys class lacked an explicit serialVersionUID field. That is fixed in in this commit. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FrontCodedRabaCoderDupKeys.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FrontCodedRabaCoderDupKeys.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FrontCodedRabaCoderDupKeys.java 2014-01-27 16:20:42 UTC (rev 7831) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FrontCodedRabaCoderDupKeys.java 2014-01-27 16:25:06 UTC (rev 7832) @@ -35,6 +35,8 @@ */ public static final transient FrontCodedRabaCoderDupKeys INSTANCE = new FrontCodedRabaCoderDupKeys(); + private static final long serialVersionUID = 1L; + protected transient static final int DEFAULT_RATIO = 8; public FrontCodedRabaCoderDupKeys(final int ratio) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-27 16:20:51
|
Revision: 7831 http://bigdata.svn.sourceforge.net/bigdata/?rev=7831&view=rev Author: thompsonbry Date: 2014-01-27 16:20:42 +0000 (Mon, 27 Jan 2014) Log Message: ----------- Fix to the implicit serialVersionUID for the FixedLengthValueRabaCoder. See #763 (Stochastic behavior in Analytic query mode). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FixedLengthValueRabaCoder.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FixedLengthValueRabaCoder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FixedLengthValueRabaCoder.java 2014-01-27 16:09:26 UTC (rev 7830) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FixedLengthValueRabaCoder.java 2014-01-27 16:20:42 UTC (rev 7831) @@ -76,6 +76,16 @@ */ public class FixedLengthValueRabaCoder implements IRabaCoder, Externalizable { + /** + * This is the historical implicit value. It has been made into an explicit + * value since the {@link IRabaCoder} API change to support duplicate keys + * for the HTree caused a change in the implict computed value. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/763" > + * Stochastic Results With Analytic Query Mode </a> + */ + private static final long serialVersionUID = 5549200745262968226L; + private static final byte VERSION0 = 0x00; /** This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-27 16:09:39
|
Revision: 7830 http://bigdata.svn.sourceforge.net/bigdata/?rev=7830&view=rev Author: thompsonbry Date: 2014-01-27 16:09:26 +0000 (Mon, 27 Jan 2014) Log Message: ----------- Fix for [1]. This incorporates a fix for the stochastic behavior of the analytic query mode whose root cause was the failure of the leaf keys coder for the HTree to support duplicate keys. Committed to CI for feedback. I will also re-run the govtrack queries to establish a new baseline and assess the performance impact relative to the last released code. [1] https://sourceforge.net/apps/trac/bigdata/ticket/763 (Stochastic results in Analytic query mode). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/.classpath branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinUtility.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/AbstractKeyBuffer.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/AbstractRaba.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/ConditionalRabaCoder.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/EmptyRaba.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/IRaba.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/MutableKeysRaba.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/MutableValueBuffer.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/AbstractCodedRaba.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/CanonicalHuffmanRabaCoder.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/EmptyRabaValueCoder.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FixedLengthValueRabaCoder.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FrontCodedRabaCoder.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/IRabaCoder.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/SimpleRabaCoder.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/DirectoryPage.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/NodeSerializer.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba/MutableKeyBuffer.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba/MutableValueBuffer.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/btree/raba/codec/MutableRabaCoder.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/htree/AbstractHTreeTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/htree/TestDuplicates.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/htree/TestHTreeWithMemStore.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/htree/TestIncrementalWrite.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/FastRDFValueCoder2.java branches/BIGDATA_RELEASE_1_3_0/build.properties branches/BIGDATA_RELEASE_1_3_0/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/CustomByteArrayFrontCodedList.java branches/BIGDATA_RELEASE_1_3_0/pom.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/lgpl-utils-1.0.7-270114.jar branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FrontCodedRabaCoderDupKeys.java Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/lgpl-utils-1.0.7-140114.jar Modified: branches/BIGDATA_RELEASE_1_3_0/.classpath =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-01-27 16:09:26 UTC (rev 7830) @@ -32,7 +32,7 @@ <classpathentry kind="src" path="bigdata-gas/src/java"/> <classpathentry kind="src" path="bigdata-gas/src/test"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/dsi-utils-1.0.6-020610.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/lgpl-utils-1.0.7-140114.jar"/> + <classpathentry kind="lib" path="bigdata/lib/lgpl-utils-1.0.7-270114.jar"/> <classpathentry kind="lib" path="bigdata-jini/lib/apache/zookeeper-3.3.3.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-continuation-7.2.2.v20101205.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-http-7.2.2.v20101205.jar"/> Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/lgpl-utils-1.0.7-140114.jar =================================================================== (Binary files differ) Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/lgpl-utils-1.0.7-270114.jar =================================================================== (Binary files differ) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/lgpl-utils-1.0.7-270114.jar ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinUtility.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinUtility.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -59,6 +59,7 @@ import com.bigdata.btree.keys.ASCIIKeyBuilderFactory; import com.bigdata.btree.keys.IKeyBuilder; import com.bigdata.btree.raba.codec.FrontCodedRabaCoder; +import com.bigdata.btree.raba.codec.FrontCodedRabaCoderDupKeys; import com.bigdata.btree.raba.codec.SimpleRabaCoder; import com.bigdata.counters.CAT; import com.bigdata.htree.HTree; @@ -486,7 +487,8 @@ @SuppressWarnings("rawtypes") final ITupleSerializer<?, ?> tupleSer = new DefaultTupleSerializer( new ASCIIKeyBuilderFactory(Bytes.SIZEOF_INT), - new FrontCodedRabaCoder(ratio),// keys : TODO Optimize for int32! + // new FrontCodedRabaCoder(ratio),// keys : TODO Optimize for int32! + new FrontCodedRabaCoderDupKeys(ratio),// keys : TODO Optimize for int32! new SimpleRabaCoder() // vals ); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -48,6 +48,7 @@ import com.bigdata.btree.raba.codec.CanonicalHuffmanRabaCoder; import com.bigdata.btree.raba.codec.FrontCodedRabaCoder; import com.bigdata.btree.raba.codec.FrontCodedRabaCoder.DefaultFrontCodedRabaCoder; +import com.bigdata.btree.raba.codec.FrontCodedRabaCoderDupKeys; import com.bigdata.btree.raba.codec.IRabaCoder; import com.bigdata.btree.view.FusedView; import com.bigdata.config.Configuration; @@ -2096,9 +2097,16 @@ // this.addrSer = AddressSerializer.INSTANCE; // this.nodeKeySer = PrefixSerializer.INSTANCE; + final Class keyRabaCoder; + if (this instanceof HTreeIndexMetadata) { + keyRabaCoder = FrontCodedRabaCoderDupKeys.class; + } else { + keyRabaCoder = DefaultFrontCodedRabaCoder.class; + } + this.nodeKeysCoder = newInstance(getProperty(indexManager, properties, namespace, Options.NODE_KEYS_CODER, - DefaultFrontCodedRabaCoder.class.getName()), IRabaCoder.class); + keyRabaCoder.getName()), IRabaCoder.class); // this.tupleSer = DefaultTupleSerializer.newInstance(); { @@ -2116,7 +2124,7 @@ final IRabaCoder leafKeysCoder = newInstance(getProperty( indexManager, properties, namespace, - Options.LEAF_KEYS_CODER, DefaultFrontCodedRabaCoder.class + Options.LEAF_KEYS_CODER, keyRabaCoder .getName()), IRabaCoder.class); final IRabaCoder valuesCoder = newInstance(getProperty( Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/AbstractKeyBuffer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/AbstractKeyBuffer.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/AbstractKeyBuffer.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -76,20 +76,24 @@ */ abstract protected int _binarySearch(final int searchKeyOffset, final byte[] searchKey); + @Override public Iterator<byte[]> iterator() { return new Iterator<byte[]>() { int i = 0; + @Override public boolean hasNext() { return i < size(); } + @Override public byte[] next() { return get(i++); } + @Override public void remove() { throw new UnsupportedOperationException(); } @@ -107,5 +111,5 @@ * The length of the leading prefix shared by all keys. */ abstract public int getPrefixLength(); - + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/AbstractRaba.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/AbstractRaba.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/AbstractRaba.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -124,25 +124,29 @@ this.a = a; } - + + @Override final public int size() { return (toIndex - fromIndex); } + @Override final public boolean isEmpty() { return toIndex == fromIndex; } + @Override final public boolean isFull() { return size() == capacity(); } + @Override final public int capacity() { return capacity; @@ -163,6 +167,7 @@ } + @Override final public byte[] get(final int index) { assert rangeCheck(index); @@ -171,6 +176,7 @@ } + @Override final public int length(final int index) { assert rangeCheck(index); @@ -184,6 +190,7 @@ } + @Override final public boolean isNull(final int index) { assert rangeCheck(index); @@ -192,6 +199,7 @@ } + @Override final public int copy(final int index, final OutputStream out) { assert rangeCheck(index); @@ -215,18 +223,21 @@ } + @Override final public Iterator<byte[]> iterator() { return new Iterator<byte[]>() { int i = fromIndex; + @Override public boolean hasNext() { return i < toIndex; } + @Override public byte[] next() { if (!hasNext()) @@ -236,6 +247,7 @@ } + @Override public void remove() { if (isReadOnly()) @@ -294,6 +306,7 @@ } + @Override public void set(final int index, final byte[] key) { assertNotReadOnly(); @@ -306,6 +319,7 @@ } + @Override public int add(final byte[] key) { assertNotReadOnly(); @@ -322,6 +336,7 @@ } + @Override public int add(final byte[] key, final int off, final int len) { assertNotReadOnly(); @@ -346,6 +361,7 @@ } + @Override public int add(final DataInput in, final int len) throws IOException { assertNotReadOnly(); @@ -362,6 +378,7 @@ } + @Override public int search(final byte[] searchKey) { if (!isKeys()) { @@ -375,6 +392,7 @@ } + @Override public String toString() { return toString(this); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/ConditionalRabaCoder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/ConditionalRabaCoder.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/ConditionalRabaCoder.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -74,18 +74,27 @@ } + @Override final public boolean isKeyCoder() { return smallCoder.isKeyCoder() && bigCoder.isKeyCoder(); } + @Override final public boolean isValueCoder() { return smallCoder.isValueCoder() && bigCoder.isValueCoder(); } + @Override + public boolean isDuplicateKeys() { + + return smallCoder.isDuplicateKeys() && bigCoder.isDuplicateKeys(); + + } + /** * De-serialization ctor. */ @@ -123,6 +132,7 @@ } + @Override public ICodedRaba decode(final AbstractFixedByteArrayBuffer data) { final boolean isSmall = data.getByte(0) == 1 ? true : false; @@ -146,6 +156,7 @@ } + @Override public ICodedRaba encodeLive(final IRaba raba, final DataOutputBuffer buf) { final int size = raba.size(); @@ -175,6 +186,7 @@ } + @Override public AbstractFixedByteArrayBuffer encode(final IRaba raba, final DataOutputBuffer buf) { @@ -201,6 +213,7 @@ } + @Override public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { @@ -220,6 +233,7 @@ } + @Override public void writeExternal(final ObjectOutput out) throws IOException { out.writeByte(VERSION0); @@ -260,72 +274,89 @@ } + @Override public AbstractFixedByteArrayBuffer data() { return data; } + @Override public int add(byte[] value, int off, int len) { return delegate.add(value, off, len); } + @Override public int add(byte[] a) { return delegate.add(a); } + @Override public int add(DataInput in, int len) throws IOException { return delegate.add(in, len); } + @Override public int capacity() { return delegate.capacity(); } - public int copy(int index, OutputStream os) { + @Override + public int copy(final int index, final OutputStream os) { return delegate.copy(index, os); } - public byte[] get(int index) { + @Override + public byte[] get(final int index) { return delegate.get(index); } + @Override public boolean isEmpty() { return delegate.isEmpty(); } + @Override public boolean isFull() { return delegate.isFull(); } + @Override public boolean isKeys() { return delegate.isKeys(); } - public boolean isNull(int index) { + @Override + public boolean isNull(final int index) { return delegate.isNull(index); } + @Override public boolean isReadOnly() { return delegate.isReadOnly(); } + @Override public Iterator<byte[]> iterator() { return delegate.iterator(); } - public int length(int index) { + @Override + public int length(final int index) { return delegate.length(index); } - public int search(byte[] searchKey) { + @Override + public int search(final byte[] searchKey) { return delegate.search(searchKey); } - public void set(int index, byte[] a) { + @Override + public void set(final int index, final byte[] a) { delegate.set(index, a); } + @Override public int size() { return delegate.size(); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/EmptyRaba.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/EmptyRaba.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/EmptyRaba.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -50,6 +50,7 @@ } + @Override final public boolean isKeys() { return true; @@ -78,6 +79,7 @@ } + @Override final public boolean isKeys() { return false; @@ -92,75 +94,92 @@ public EmptyRaba() { } - + + @Override final public int capacity() { return 0; } + @Override final public boolean isEmpty() { return true; } + @Override final public boolean isFull() { return true; } + @Override final public int size() { return 0; } + @Override final public boolean isReadOnly() { return true; } + @Override final public boolean isNull(int index) { throw new IndexOutOfBoundsException(); } + @Override final public int length(int index) { throw new IndexOutOfBoundsException(); } + @Override final public byte[] get(int index) { throw new IndexOutOfBoundsException(); } + @Override final public int copy(int index, OutputStream os) { throw new IndexOutOfBoundsException(); } + @Override @SuppressWarnings("unchecked") final public Iterator<byte[]> iterator() { return EmptyIterator.DEFAULT; } + @Override final public int search(byte[] searchKey) { if (isKeys()) return -1; throw new UnsupportedOperationException(); } + @Override final public void set(int index, byte[] a) { throw new UnsupportedOperationException(); } + @Override final public int add(byte[] a) { throw new UnsupportedOperationException(); } + @Override final public int add(byte[] value, int off, int len) { throw new UnsupportedOperationException(); } + @Override final public int add(DataInput in, int len) throws IOException { throw new UnsupportedOperationException(); } + @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { // NOP } + @Override public void writeExternal(ObjectOutput out) throws IOException { // NOP } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/IRaba.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/IRaba.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/IRaba.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -64,7 +64,6 @@ * support {@link #search(byte[])} and <code>null</code>s are allowed. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ * * @todo consider discarding all of the {@link #add(byte[])} methods. It is * typically easier and more convenient to directly manage the existing @@ -78,7 +77,7 @@ /** * Return <code>true</code> if this implementation is read-only. */ - public boolean isReadOnly(); + boolean isReadOnly(); /** * When <code>true</code> the {@link IRaba} supports search and elements are @@ -96,6 +95,19 @@ */ boolean isKeys(); + /* + * TODO This could be added to differentiate between IRaba implementations + * that do / do not support duplicate keys. The ones used with the HTree do. + * The rest do not. + */ +// /** +// * When <code>true</code>, then {@link IRaba} supports duplicate keys. +// * +// * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/763" > +// * Stochastic Results With Analytic Query Mode </a> +// */ +// boolean isDuplicateKeys(); + /** * The capacity of the logical byte[][]. * @@ -194,6 +206,7 @@ * <code>null</code>, then the iterator will report a <code>null</code> for * that element. */ + @Override public Iterator<byte[]> iterator(); /* Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/MutableKeysRaba.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/MutableKeysRaba.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/MutableKeysRaba.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -42,6 +42,7 @@ /** * No - this view is mutable. */ + @Override public boolean isReadOnly() { return false; @@ -51,12 +52,13 @@ /** * Yes. */ + @Override final public boolean isKeys() { return true; } - + /** * Create a view of a byte[][]. All elements in the array are visible in the * view. @@ -113,6 +115,7 @@ } + @Override public MutableKeysRaba resize(final int n) { return (MutableKeysRaba) super.resize(n); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/MutableValueBuffer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/MutableValueBuffer.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/MutableValueBuffer.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -63,6 +63,7 @@ /** * For B+Tree values. */ + @Override final public boolean isKeys() { return false; @@ -142,24 +143,28 @@ } + @Override final public int size() { return nvalues; } + @Override final public boolean isEmpty() { return nvalues == 0; } + @Override final public boolean isFull() { return nvalues == values.length; } + @Override final public int capacity() { return values.length; @@ -188,6 +193,7 @@ } + @Override final public int length(final int index) { assert rangeCheck(index); @@ -201,6 +207,7 @@ } + @Override final public boolean isNull(final int index) { assert rangeCheck(index); @@ -209,6 +216,7 @@ } + @Override final public int copy(final int index, final OutputStream out) { assert rangeCheck(index); @@ -232,18 +240,21 @@ } + @Override final public Iterator<byte[]> iterator() { return new Iterator<byte[]>() { int i = 0; + @Override public boolean hasNext() { return i < nvalues; } + @Override public byte[] next() { if (!hasNext()) @@ -253,6 +264,7 @@ } + @Override public void remove() { if (isReadOnly()) @@ -281,6 +293,7 @@ } + @Override public void set(final int index, final byte[] key) { assert rangeCheck(index); @@ -289,6 +302,7 @@ } + @Override public int add(final byte[] key) { assertNotFull(); @@ -299,6 +313,7 @@ } + @Override public int add(final byte[] key, final int off, final int len) { assertNotFull(); @@ -313,6 +328,7 @@ } + @Override public int add(final DataInput in, final int len) throws IOException { assertNotFull(); @@ -327,12 +343,14 @@ } + @Override final public int search(final byte[] searchKey) { throw new UnsupportedOperationException(); } + @Override public String toString() { return AbstractRaba.toString(this); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/AbstractCodedRaba.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/AbstractCodedRaba.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/AbstractCodedRaba.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -39,7 +39,6 @@ * mutation operations. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ abstract public class AbstractCodedRaba implements ICodedRaba { @@ -50,28 +49,34 @@ /** * Implementation is read-only. */ + @Override final public boolean isReadOnly() { return true; } + @Override final public int add(byte[] a) { throw new UnsupportedOperationException(); } + @Override final public int add(byte[] value, int off, int len) { throw new UnsupportedOperationException(); } + @Override final public int add(DataInput in, int len) throws IOException { throw new UnsupportedOperationException(); } + @Override final public void set(int index, byte[] a) { throw new UnsupportedOperationException(); } + @Override final public String toString() { return AbstractRaba.toString(this); @@ -82,18 +87,21 @@ * Basic implementation may be overridden if a faster implementation is * available. */ + @Override public Iterator<byte[]> iterator() { return new Iterator<byte[]>() { int i = 0; + @Override public boolean hasNext() { return i < size(); } + @Override public byte[] next() { if (!hasNext()) @@ -103,6 +111,7 @@ } + @Override public void remove() { throw new UnsupportedOperationException(); @@ -112,5 +121,5 @@ }; } - + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/CanonicalHuffmanRabaCoder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/CanonicalHuffmanRabaCoder.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/CanonicalHuffmanRabaCoder.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -202,7 +202,6 @@ * the codec and the decoder, which must process 8 bytes at a time. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class CanonicalHuffmanRabaCoder implements IRabaCoder, Externalizable { @@ -218,21 +217,25 @@ */ final protected static transient byte VERSION0 = 0x00; + @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { // No state. } + @Override public void writeExternal(ObjectOutput out) throws IOException { // No state. } + @Override final public boolean isKeyCoder() { return true; } + @Override final public boolean isValueCoder() { return true; @@ -666,6 +669,7 @@ /** * Return the #of distinct symbols used to generate the code. */ + @Override abstract public int getSymbolCount(); /** @@ -930,6 +934,7 @@ private final DecoderInputs decoderInputs; + @Override public DecoderInputs decoderInputs() { return decoderInputs; @@ -956,12 +961,14 @@ } + @Override final public int byte2symbol(final byte b) { return byte2symbol.get(b); } + @Override final public byte symbol2byte(final int symbol) { return (byte) symbol2byte[symbol]; @@ -1104,6 +1111,7 @@ } + @Override public AbstractFixedByteArrayBuffer encode(final IRaba raba, final DataOutputBuffer buf) { @@ -1111,6 +1119,7 @@ } + @Override public ICodedRaba encodeLive(final IRaba raba, final DataOutputBuffer buf) { final AbstractCodingSetup setup = new RabaCodingSetup(raba); @@ -1334,6 +1343,7 @@ } + @Override public ICodedRaba decode(final AbstractFixedByteArrayBuffer data) { return new CodedRabaImpl(data); @@ -1668,6 +1678,7 @@ } + @Override final public int size() { return size; @@ -1677,12 +1688,14 @@ /** * The capacity is equal to the size (the data are immutable). */ + @Override final public int capacity() { return size; } + @Override final public boolean isEmpty() { return size == 0; @@ -1693,18 +1706,21 @@ * Always returns <code>true</code> since {@link #size()} == * {@link #capacity()} by definition for this class. */ + @Override final public boolean isFull() { return true; } + @Override final public boolean isKeys() { return isKeys; } + @Override public AbstractFixedByteArrayBuffer data() { return data; @@ -1750,6 +1766,7 @@ // // } + @Override public boolean isNull(final int index) { if (index < 0 || index >= size) @@ -1774,6 +1791,7 @@ * This computes the length of the decoded byte[] by counting the code * words for the coded value. */ + @Override public int length(final int index) { if (index < 0 || index >= size) @@ -1876,6 +1894,7 @@ * allocates the byte[]. The second pass decodes into the allocated * byte[]. */ + @Override public byte[] get(final int index) { if (index < 0 || index >= size) @@ -2048,6 +2067,7 @@ * This decodes the value at the specified index in a single pass onto * the caller's stream. */ + @Override public int copy(final int index, final OutputStream os) { if (index < 0 || index >= size) @@ -2213,6 +2233,7 @@ * Basic implementation may be overridden if a faster implementation is * available. */ + @Override public Iterator<byte[]> iterator() { /** @@ -2232,12 +2253,14 @@ int i = 0; + @Override public boolean hasNext() { return i < size(); } + @Override public byte[] next() { if (!hasNext()) @@ -2280,6 +2303,7 @@ } + @Override public void remove() { throw new UnsupportedOperationException(); @@ -2294,6 +2318,7 @@ * This is an efficient binary search performed without materializing the * coded byte[][]. */ + @Override public int search(final byte[] probe) { if (probe == null) @@ -2520,4 +2545,11 @@ } + @Override + public boolean isDuplicateKeys() { + + return false; + + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/EmptyRabaValueCoder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/EmptyRabaValueCoder.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/EmptyRabaValueCoder.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -46,7 +46,6 @@ * B+Tree will be <strong>discarded</strong> by this {@link IRabaCoder}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class EmptyRabaValueCoder implements IRabaCoder, Externalizable { @@ -55,6 +54,7 @@ */ private static final long serialVersionUID = -8011456562258609162L; + @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { @@ -62,6 +62,7 @@ } + @Override public void writeExternal(ObjectOutput out) throws IOException { // NOP @@ -77,6 +78,7 @@ /** * No. Keys can not be constrained to be empty. */ + @Override final public boolean isKeyCoder() { return false; @@ -86,12 +88,21 @@ /** * Yes. */ + @Override final public boolean isValueCoder() { return true; } + @Override + public boolean isDuplicateKeys() { + + return false; + + } + + @Override public ICodedRaba encodeLive(final IRaba raba, final DataOutputBuffer buf) { if (raba == null) @@ -119,6 +130,7 @@ * <strong>Any data in the {@link IRaba} will be discarded!</strong> Only * the {@link IRaba#size()} is maintained. */ + @Override public AbstractFixedByteArrayBuffer encode(final IRaba raba, final DataOutputBuffer buf) { @@ -140,6 +152,7 @@ } + @Override public ICodedRaba decode(final AbstractFixedByteArrayBuffer data) { return new EmptyCodedRaba(data); @@ -182,6 +195,7 @@ } + @Override final public AbstractFixedByteArrayBuffer data() { return data; @@ -191,43 +205,50 @@ /** * Yes. */ + @Override final public boolean isReadOnly() { return true; } + @Override public boolean isKeys() { return false; } + @Override final public int capacity() { return size; } + @Override final public int size() { return size; } + @Override final public boolean isEmpty() { return size == 0; } + @Override final public boolean isFull() { return true; } - final public boolean isNull(int index) { + @Override + final public boolean isNull(final int index) { if (index < 0 || index >= size) throw new IndexOutOfBoundsException(); @@ -236,7 +257,8 @@ } - final public int length(int index) { + @Override + final public int length(final int index) { if (index < 0 || index >= size) throw new IndexOutOfBoundsException(); @@ -245,7 +267,8 @@ } - final public byte[] get(int index) { + @Override + final public byte[] get(final int index) { if (index < 0 || index >= size) throw new IndexOutOfBoundsException(); @@ -254,7 +277,8 @@ } - final public int copy(int index, OutputStream os) { + @Override + final public int copy(final int index, final OutputStream os) { if (index < 0 || index >= size) throw new IndexOutOfBoundsException(); @@ -263,18 +287,21 @@ } + @Override final public Iterator<byte[]> iterator() { return new Iterator<byte[]>() { int i = 0; + @Override public boolean hasNext() { return i < size; } + @Override public byte[] next() { i++; @@ -283,6 +310,7 @@ } + @Override public void remove() { throw new UnsupportedOperationException(); @@ -299,6 +327,7 @@ * @throws UnsupportedOperationException * unless the {@link IRaba} represents B+Tree keys. */ + @Override final public int search(final byte[] searchKey) { if (isKeys()) @@ -312,22 +341,27 @@ * Mutation API is not supported. */ + @Override final public int add(byte[] a) { throw new UnsupportedOperationException(); } + @Override final public int add(byte[] value, int off, int len) { throw new UnsupportedOperationException(); } + @Override final public int add(DataInput in, int len) throws IOException { throw new UnsupportedOperationException(); } + @Override final public void set(int index, byte[] a) { throw new UnsupportedOperationException(); } + @Override final public String toString() { return AbstractRaba.toString(this); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FixedLengthValueRabaCoder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FixedLengthValueRabaCoder.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FixedLengthValueRabaCoder.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -73,7 +73,6 @@ * </dl> * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class FixedLengthValueRabaCoder implements IRabaCoder, Externalizable { @@ -87,6 +86,7 @@ /** * No. */ + @Override final public boolean isKeyCoder() { return false; @@ -96,12 +96,20 @@ /** * Yes. */ + @Override final public boolean isValueCoder() { return true; } + @Override + public boolean isDuplicateKeys() { + + return false; + + } + /** * The required length for all non-<code>null</code> values. */ @@ -132,12 +140,14 @@ } + @Override public void writeExternal(final ObjectOutput out) throws IOException { out.writeInt(len); } + @Override public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { @@ -168,6 +178,7 @@ * if the {@link IRaba} has a non-<code>null</code> value with a * length other than the length specified to the constructor. */ + @Override public ICodedRaba encodeLive(final IRaba raba, final DataOutputBuffer buf) { if (raba == null) @@ -262,6 +273,7 @@ } + @Override public AbstractFixedByteArrayBuffer encode(final IRaba raba, final DataOutputBuffer buf) { @@ -269,6 +281,7 @@ } + @Override public ICodedRaba decode(final AbstractFixedByteArrayBuffer data) { return new CodedRabaImpl(len, data); @@ -355,36 +368,42 @@ */ private final int O_values; + @Override final public AbstractFixedByteArrayBuffer data() { return data; } + @Override public boolean isKeys() { return false; } + @Override final public int capacity() { return size; } + @Override final public int size() { return size; } + @Override final public boolean isEmpty() { return size == 0; } + @Override final public boolean isFull() { return true; @@ -398,6 +417,7 @@ } + @Override public boolean isNull(final int index) { rangeCheck(index); @@ -406,6 +426,7 @@ } + @Override public int length(final int index) { if (isNull(index)) @@ -415,6 +436,7 @@ } + @Override public byte[] get(final int index) { if (isNull(index)) @@ -431,6 +453,7 @@ } + @Override public int copy(final int index, final OutputStream os) { if (isNull(index)) @@ -456,6 +479,7 @@ * Search */ + @Override public int search(final byte[] key) { throw new UnsupportedOperationException(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FrontCodedRabaCoder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FrontCodedRabaCoder.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FrontCodedRabaCoder.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -45,7 +45,6 @@ * The data MUST be ordered. <code>null</code> values are not allowed. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class FrontCodedRabaCoder implements IRabaCoder, Externalizable { @@ -59,6 +58,7 @@ private int ratio; + @Override public String toString() { return super.toString() + "{ratio=" + ratio + "}"; @@ -72,7 +72,6 @@ * * @author <a href="mailto:tho...@us...">Bryan * Thompson</a> - * @version $Id$ */ public static class DefaultFrontCodedRabaCoder extends FrontCodedRabaCoder { @@ -91,11 +90,13 @@ } + @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { // NOP } + @Override public void writeExternal(ObjectOutput out) throws IOException { // NOP } @@ -142,18 +143,28 @@ } + @Override final public boolean isKeyCoder() { return true; } + @Override final public boolean isValueCoder() { return false; } + @Override + public boolean isDuplicateKeys() { + + return false; + + } + + @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { @@ -161,6 +172,7 @@ } + @Override public void writeExternal(ObjectOutput out) throws IOException { out.writeInt(ratio); @@ -190,6 +202,7 @@ /** The byte offset of the start of the front-coded representation. */ private static final int O_DATA = O_RATIO + SIZEOF_RATIO; + @Override public ICodedRaba encodeLive(final IRaba raba, final DataOutputBuffer buf) { if (raba == null) @@ -212,7 +225,7 @@ // front-code the byte[][]. final CustomByteArrayFrontCodedList decoder = new CustomByteArrayFrontCodedList( - raba.iterator(), ratio); + raba.iterator(), ratio, isDuplicateKeys()); try { @@ -242,6 +255,7 @@ } + @Override public AbstractFixedByteArrayBuffer encode(final IRaba raba, final DataOutputBuffer buf) { @@ -255,9 +269,10 @@ } + @Override public ICodedRaba decode(final AbstractFixedByteArrayBuffer data) { - return new CodedRabaImpl(data); + return new CodedRabaImpl(data, isDuplicateKeys()); } @@ -278,8 +293,12 @@ * * @param data * The record containing the coded data. + * @param hasDups + * <code>true</code> iff the {@link IRabaCoder} supports + * duplicate keys. */ - public CodedRabaImpl(final AbstractFixedByteArrayBuffer data) { + public CodedRabaImpl(final AbstractFixedByteArrayBuffer data, + final boolean hasDups) { final byte version = data.getByte(O_VERSION); @@ -297,7 +316,7 @@ // wrap slice with decoder. this.decoder = new CustomByteArrayFrontCodedList(size, ratio, data - .array(), data.off() + O_DATA, data.len()); + .array(), data.off() + O_DATA, data.len(), hasDups); this.data = data; @@ -321,6 +340,7 @@ } + @Override public AbstractFixedByteArrayBuffer data() { return data; @@ -330,24 +350,28 @@ /** * Represents B+Tree keys. */ + @Override final public boolean isKeys() { return true; } + @Override final public int size() { return decoder.size(); } + @Override final public int capacity() { return decoder.size(); } + @Override final public boolean isEmpty() { return size() == 0; @@ -358,6 +382,7 @@ * Always returns <code>true</code> since the front-coded representation * is dense. */ + @Override final public boolean isFull() { return true; @@ -368,24 +393,28 @@ * Always returns <code>false</code> (<code>null</code>s are not * allowed). */ + @Override final public boolean isNull(final int index) { return false; } + @Override final public byte[] get(final int index) { return decoder.get(index); } + @Override final public int length(final int index) { return decoder.arrayLength(index); } + @Override public int copy(final int index, final OutputStream os) { try { @@ -400,12 +429,14 @@ } + @Override public Iterator<byte[]> iterator() { return decoder.iterator(); } + @Override public int search(final byte[] searchKey) { // optimization: always keys. Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FrontCodedRabaCoderDupKeys.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FrontCodedRabaCoderDupKeys.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/FrontCodedRabaCoderDupKeys.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -0,0 +1,59 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.btree.raba.codec; + +/** + * Variant of the {@link FrontCodedRabaCoder} that supports duplicate keys. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class FrontCodedRabaCoderDupKeys extends FrontCodedRabaCoder { + + /** + * A default instance. + */ + public static final transient FrontCodedRabaCoderDupKeys INSTANCE = new FrontCodedRabaCoderDupKeys(); + + protected transient static final int DEFAULT_RATIO = 8; + + public FrontCodedRabaCoderDupKeys(final int ratio) { + + super(ratio); + + } + + public FrontCodedRabaCoderDupKeys() { + + super(DEFAULT_RATIO); + + } + + @Override + public boolean isDuplicateKeys() { + + return true; + + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/IRabaCoder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/IRabaCoder.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/IRabaCoder.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -36,7 +36,6 @@ * @see IRaba * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface IRabaCoder extends Serializable { @@ -55,6 +54,14 @@ boolean isValueCoder(); /** + * Return true iff this {@link IRabaCoder} supports duplicate keys. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/763" > + * Stochastic Results With Analytic Query Mode </a> + */ + boolean isDuplicateKeys(); + + /** * Encode the data, returning an {@link ICodedRaba}. Implementations of this * method should be optimized for the very common use case where the caller * requires immediate access to the coded data record. In that case, many of Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/SimpleRabaCoder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/SimpleRabaCoder.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/raba/codec/SimpleRabaCoder.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -47,7 +47,6 @@ * and B+Tree values. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class SimpleRabaCoder implements IRabaCoder, Externalizable { @@ -74,6 +73,7 @@ /** * Yes. */ + @Override final public boolean isKeyCoder() { return true; @@ -83,25 +83,35 @@ /** * Yes. */ + @Override final public boolean isValueCoder() { return true; } - /** + @Override + public boolean isDuplicateKeys() { + + return false; + + } + + /** * De-serialization ctor. Use {@link #INSTANCE} otherwise. */ public SimpleRabaCoder() { } + @Override public void writeExternal(ObjectOutput out) throws IOException { // NOP } + @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { @@ -136,6 +146,7 @@ return O_SIZE + SIZEOF_SIZE + SIZEOF_CAPACITY; } + @Override public ICodedRaba encodeLive(final IRaba raba, final DataOutputBuffer buf) { if (raba == null) @@ -244,6 +255,7 @@ } + @Override public AbstractFixedByteArrayBuffer encode(final IRaba raba, final DataOutputBuffer buf) { @@ -256,6 +268,7 @@ } + @Override public ICodedRaba decode(final AbstractFixedByteArrayBuffer data) { return new CodedRabaImpl(data); @@ -366,36 +379,42 @@ */ private final int O_offsets; + @Override final public AbstractFixedByteArrayBuffer data() { return data; } + @Override public boolean isKeys() { return isKeys; } + @Override final public int capacity() { return capacity; } + @Override final public int size() { return size; } + @Override final public boolean isEmpty() { return size == 0; } + @Override final public boolean isFull() { return true; @@ -409,6 +428,7 @@ } + @Override public boolean isNull(final int index) { if (index >= size && index < capacity) { @@ -425,6 +445,7 @@ } + @Override public int length(final int index) { if (isNull(index)) @@ -443,6 +464,7 @@ } + @Override public byte[] get(final int index) { if (isNull(index)) @@ -477,6 +499,7 @@ } + @Override public int copy(final int index, final OutputStream os) { if (isNull(index)) @@ -509,6 +532,7 @@ * Search */ + @Override public int search(final byte[] key) { if (!isKeys()) Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/DirectoryPage.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/DirectoryPage.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/DirectoryPage.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -38,7 +38,6 @@ import com.bigdata.btree.ITuple; import com.bigdata.btree.ITupleIterator; import com.bigdata.btree.Node; -import com.bigdata.btree.PageStats; import com.bigdata.htree.AbstractHTree.ChildMemoizer; import com.bigdata.htree.AbstractHTree.LoadChildRequest; import com.bigdata.htree.data.IDirectoryData; @@ -2058,6 +2057,23 @@ } /** + * If this is an overflow directory then the depth-based hashCode is irrelevant + * since it is used as a blob container for BucketPage references. + */ + public int getLocalHashCode(final byte[] key, final int prefixLength) { + if (isOverflowDirectory()) { + /* + * Shouldn't need to check the key, this will be handled when + * the BucketPage is checked for a precise match + */ + return 0; + } + + return super.getLocalHashCode(key, prefixLength); + + } + + /** * This method is never called at present since DirectoryPages are * always created at maximum depth. Whether there is any advantage * in supporting pages of lesser depths is yet to be determined. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/NodeSerializer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/NodeSerializer.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/NodeSerializer.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -253,6 +253,19 @@ */ this.nodeCoder = new DefaultDirectoryPageCoder(); + if (!indexMetadata.getTupleSerializer().getLeafKeysCoder().isDuplicateKeys()) { + + /* + * This constraint *could* be relaxed, but the HTree API presumes + * that we can have duplicate keys and this check verifies tha the + * keys coder supports duplicate keys. + */ + + throw new IllegalArgumentException( + "The leaf keys coder for HTree should allow duplicate keys."); + + } + /* * Note: We are using the same leaf coder class as the BTree. */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba/MutableKeyBuffer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba/MutableKeyBuffer.java 2014-01-26 20:37:25 UTC (rev 7829) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba/MutableKeyBuffer.java 2014-01-27 16:09:26 UTC (rev 7830) @@ -187,6 +187,7 @@ } + @Override public String toString() { return AbstractRaba.toString(this); @@ -196,12 +197,14 @@ /** * Returns a reference to the key at that index. */ + @Override final public byte[] get(final int index) { return keys[index]; } + @Override final public int length(final int index) { final byte[] tmp = keys[index]; @@ -213,6 +216,7 @@ } + @Override final public int copy(final int index, final OutputStream out) { final byte[] tmp = keys[index]; @@ -236,12 +240,14 @@ * * @return <code>true</code> iff the key at that index is <code>null</code>. */ + @Override final public boolean isNull(final int index) { return keys[index] == null; } + @Override final public boolean isEmpty() { return nkeys == 0; @@ -256,18 +262,21 @@ * MUST explicitly scan a buddy bucket to determine the #of keys in a buddy * bucket on the page. */ + @Override final public int size() { return nkeys; } + @Override final public int capacity() { return keys.length; } + @Override final public boolean isFull() { return nkeys == keys.length; @@ -277,6 +286,7 @@ /** * Mutable. */ + @Override final public boolean isReadOnly() { return false; @@ -284,11 +294,11 @@ } /** - * Instances are NOT searchable. Duplicates and <code>null</code>s ARE - * permitted. + * Instances are searchable and support duplicate keys. * - * @returns <code>false</code> + * @returns <code>true</code> */ + @Override final public boolean isKeys() { return true; @@ -301,20 +311,24 @@ * This iterator visits all keys on the bucket page, including * <code>null</code>s. */ + @Override public Iterator<byte[]> iterator() { return new Iterator<byte[]>() { int i = 0; + @Override public boolean hasNext() { return i < size(); } + @Override public byte[] next() { return get(i++); } + @Override public void remove() { throw new UnsupportedOperationException(); } @@ -32... [truncated message content] |
From: <mrp...@us...> - 2014-01-26 20:37:39
|
Revision: 7829 http://bigdata.svn.sourceforge.net/bigdata/?rev=7829&view=rev Author: mrpersonick Date: 2014-01-26 20:37:25 +0000 (Sun, 26 Jan 2014) Log Message: ----------- major commit of RDR. one step away from entirely removing the overloading of the context position for sids. working checkpoint. Modified Paths: -------------- branches/RDR/.classpath branches/RDR/bigdata/src/resources/logging/log4j.properties branches/RDR/bigdata-gas/src/test/com/bigdata/rdf/graph/data/smallGraph.ttl branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BNodeContextFactory.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNode.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNodeImpl.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataStatementImpl.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactory.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactoryImpl.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueImpl.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/ntriples/BigdataNTriplesParser.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/BigdataRDFXMLParser.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/turtle/BigdataTurtleParser.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/history/HistoryIndexTupleSerializer.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOTupleSerializer.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestBFS.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/internal/TestEncodeDecodeKeys.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/internal/TestEncodeDecodeMixedIVs.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestNTriplesWithSids.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestRDFXMLInterchangeWithStatementIdentifiers.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestStatementBuffer.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestALPPinTrac773.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTNamedSubqueryOptimizer.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTUnionFiltersOptimizer.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPO.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOTupleSerializer.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOValueCoders.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/store/TestStatementIdentifiers.java branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/TestProvenanceQuery.java Added Paths: ----------- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java Modified: branches/RDR/.classpath =================================================================== --- branches/RDR/.classpath 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/.classpath 2014-01-26 20:37:25 UTC (rev 7829) @@ -33,7 +33,7 @@ <classpathentry kind="src" path="bigdata-gas/src/test"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/dsi-utils-1.0.6-020610.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/lgpl-utils-1.0.6-020610.jar"/> - <classpathentry kind="lib" path="bigdata-jini/lib/apache/zookeeper-3.3.3.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/apache/zookeeper-3.3.3.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-continuation-7.2.2.v20101205.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-http-7.2.2.v20101205.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-io-7.2.2.v20101205.jar"/> @@ -45,8 +45,8 @@ <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-webapp-7.2.2.v20101205.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-xml-7.2.2.v20101205.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/unimi/colt-1.2.0.jar"/> - <classpathentry kind="lib" path="bigdata/lib/icu/icu4j-4.8.jar"/> - <classpathentry kind="lib" path="bigdata/lib/icu/icu4j-charset-4.8.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/icu/icu4j-4.8.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/icu/icu4j-charset-4.8.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/junit-3.8.1.jar" sourcepath="/root/.m2/repository/junit/junit/3.8.1/junit-3.8.1-sources.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/browser.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/classserver.jar"/> @@ -67,24 +67,24 @@ <classpathentry exported="true" kind="lib" path="bigdata/lib/unimi/fastutil-5.1.5.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/lucene/lucene-analyzers-3.0.0.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/lucene/lucene-core-3.0.0.jar"/> - <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/> + <classpathentry exported="true" kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/high-scale-lib-v1.1.2.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/junit-ext-1.1-b3-dev.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/slf4j-api-1.6.1.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/slf4j-log4j12-1.6.1.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/commons-codec-1.4.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/commons-logging-1.1.1.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/httpclient-4.1.3.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/httpclient-cache-4.1.3.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/httpcore-4.1.4.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/httpmime-4.1.3.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/commons-fileupload-1.2.2.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/commons-io-2.1.jar"/> - <classpathentry kind="lib" path="bigdata/lib/apache/log4j-1.2.17.jar"/> - <classpathentry kind="lib" path="bigdata-rdf/lib/openrdf-sesame-2.6.10-onejar.jar"/> - <classpathentry kind="lib" path="bigdata-rdf/lib/sesame-rio-testsuite-2.6.10.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/sesame-sparql-testsuite-2.6.10.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/sesame-store-testsuite-2.6.10.jar"/> - <classpathentry kind="lib" path="bigdata-rdf/lib/nxparser-1.2.3.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/commons-codec-1.4.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/commons-logging-1.1.1.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/httpclient-4.1.3.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/httpclient-cache-4.1.3.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/httpcore-4.1.4.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/httpmime-4.1.3.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/commons-fileupload-1.2.2.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/commons-io-2.1.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/apache/log4j-1.2.17.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/openrdf-sesame-2.6.10-onejar.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/sesame-rio-testsuite-2.6.10.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/sesame-sparql-testsuite-2.6.10.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/sesame-store-testsuite-2.6.10.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/nxparser-1.2.3.jar"/> <classpathentry kind="output" path="bin"/> </classpath> Modified: branches/RDR/bigdata/src/resources/logging/log4j.properties =================================================================== --- branches/RDR/bigdata/src/resources/logging/log4j.properties 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/bigdata/src/resources/logging/log4j.properties 2014-01-26 20:37:25 UTC (rev 7829) @@ -16,9 +16,13 @@ log4j.logger.com.bigdata.rdf.store.DataLoader=INFO log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=INFO +#log4j.logger.com.bigdata.rdf.rio.StatementBuffer=ALL +#log4j.logger.com.bigdata.rdf.sail.TestProvenanceQuery=ALL + # Test suite loggers. #log4j.logger.junit=INFO #log4j.logger.com.bigdata.btree.AbstractBTreeTestCase=INFO +log4j.logger.junit.framework.TestCase2=ERROR # dest1 log4j.appender.dest1=org.apache.log4j.ConsoleAppender Modified: branches/RDR/bigdata-gas/src/test/com/bigdata/rdf/graph/data/smallGraph.ttl =================================================================== --- branches/RDR/bigdata-gas/src/test/com/bigdata/rdf/graph/data/smallGraph.ttl 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/bigdata-gas/src/test/com/bigdata/rdf/graph/data/smallGraph.ttl 2014-01-26 20:37:25 UTC (rev 7829) @@ -1,19 +1,19 @@ -@prefix : <http://www.bigdata.com/> . +@prefix bd: <http://www.bigdata.com/> . @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . @prefix foaf: <http://xmlns.com/foaf/0.1/> . #: { - :Mike rdf:type foaf:Person . - :Bryan rdf:type foaf:Person . - :Martyn rdf:type foaf:Person . + bd:Mike rdf:type foaf:Person . + bd:Bryan rdf:type foaf:Person . + bd:Martyn rdf:type foaf:Person . - :Mike rdfs:label "Mike" . - :Bryan rdfs:label "Bryan" . - :DC rdfs:label "DC" . + bd:Mike rdfs:label "Mike" . + bd:Bryan rdfs:label "Bryan" . + bd:DC rdfs:label "DC" . - :Mike foaf:knows :Bryan . - :Bryan foaf:knows :Mike . - :Bryan foaf:knows :Martyn . - :Martyn foaf:knows :Bryan . + bd:Mike foaf:knows bd:Bryan . + bd:Bryan foaf:knows bd:Mike . + bd:Bryan foaf:knows bd:Martyn . + bd:Martyn foaf:knows bd:Bryan . #} Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2014-01-26 20:37:25 UTC (rev 7829) @@ -35,6 +35,8 @@ import java.io.IOException; import java.math.BigDecimal; import java.math.BigInteger; +import java.net.InetAddress; +import java.net.UnknownHostException; import java.util.LinkedList; import java.util.List; import java.util.UUID; @@ -66,6 +68,7 @@ import com.bigdata.rdf.internal.impl.literal.XSDUnsignedLongIV; import com.bigdata.rdf.internal.impl.literal.XSDUnsignedShortIV; import com.bigdata.rdf.internal.impl.uri.FullyInlineURIIV; +import com.bigdata.rdf.internal.impl.uri.IPAddrIV; import com.bigdata.rdf.internal.impl.uri.PartlyInlineURIIV; import com.bigdata.rdf.internal.impl.uri.URIExtensionIV; import com.bigdata.rdf.internal.impl.uri.VocabURIByteIV; @@ -437,7 +440,7 @@ final ISPO spo = SPOKeyOrder.SPO.decodeKey(key, o); // all spos that have a sid are explicit spo.setStatementType(StatementEnum.Explicit); - spo.setStatementIdentifier(true); +// spo.setStatementIdentifier(true); // create a sid iv and return it return new SidIV(spo); } @@ -535,6 +538,21 @@ // The data type final DTE dte = AbstractIV.getDTE(flags); switch (dte) { + case XSDBoolean: { + /* + * TODO Using XSDBoolean so that we can know how to decode this thing + * as an IPAddrIV. We need to fix the Extension mechanism for URIs. + * Extension is already used above. + */ + try { + final byte[] addr = new byte[4]; + System.arraycopy(key, o, addr, 0, 4); + final InetAddress ip = InetAddress.getByAddress(addr); + return new IPAddrIV(ip); + } catch (UnknownHostException ex) { + throw new RuntimeException(ex); + } + } case XSDByte: { final byte x = key[o];//KeyBuilder.decodeByte(key[o]); return new VocabURIByteIV<BigdataURI>(x); Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java 2014-01-26 20:37:25 UTC (rev 7829) @@ -28,6 +28,7 @@ package com.bigdata.rdf.internal; import java.math.BigInteger; +import java.net.UnknownHostException; import java.util.LinkedHashMap; import java.util.Map; import java.util.TimeZone; @@ -58,6 +59,7 @@ import com.bigdata.rdf.internal.impl.literal.XSDUnsignedLongIV; import com.bigdata.rdf.internal.impl.literal.XSDUnsignedShortIV; import com.bigdata.rdf.internal.impl.uri.FullyInlineURIIV; +import com.bigdata.rdf.internal.impl.uri.IPAddrIV; import com.bigdata.rdf.internal.impl.uri.URIExtensionIV; import com.bigdata.rdf.lexicon.LexiconKeyOrder; import com.bigdata.rdf.model.BigdataBNode; @@ -435,6 +437,22 @@ */ private IV<BigdataURI, ?> createInlineURIIV(final URI value) { + try { + + final String s = value.stringValue(); + + if (s.startsWith(IPAddrIV.NAMESPACE)) { + + return new IPAddrIV(s.substring(IPAddrIV.NAMESPACE_LEN)); + + } + + } catch (UnknownHostException ex) { + + log.warn("unknown host exception, will not inline: " + value); + + } + if (maxInlineTextLength == 0) { return null; Added: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java (rev 0) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java 2014-01-26 20:37:25 UTC (rev 7829) @@ -0,0 +1,349 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.internal.impl.uri; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.io.ObjectStreamException; +import java.io.Serializable; +import java.net.InetAddress; +import java.net.UnknownHostException; + +import org.openrdf.model.URI; +import org.openrdf.model.Value; + +import com.bigdata.btree.BytesUtil.UnsignedByteArrayComparator; +import com.bigdata.btree.keys.IKeyBuilder; +import com.bigdata.io.LongPacker; +import com.bigdata.rdf.internal.DTE; +import com.bigdata.rdf.internal.ILexiconConfiguration; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.internal.IVUtility; +import com.bigdata.rdf.internal.VTE; +import com.bigdata.rdf.internal.impl.AbstractInlineIV; +import com.bigdata.rdf.lexicon.LexiconRelation; +import com.bigdata.rdf.model.BigdataBNode; +import com.bigdata.rdf.model.BigdataURI; +import com.bigdata.rdf.model.BigdataValueFactory; +import com.bigdata.rdf.spo.SPOKeyOrder; + +/** + * Internal value representing an inline IP address. Uses the InetAddress + * class to represent the IP address and perform the translation to and from + * byte[], which is then used directly in the IV key (after the flags). + * <p> + * This internal value has a {@link VTE} of {@link VTE#URI}. + * <p> + * {@inheritDoc} + */ +public class IPAddrIV<V extends BigdataURI> extends AbstractInlineIV<V, InetAddress> + implements Serializable, URI { + + /** + * + */ + private static final long serialVersionUID = 685148537376856907L; + +// private static final transient Logger log = Logger.getLogger(SidIV.class); + + public static final String NAMESPACE = "ip:/"; + + public static final int NAMESPACE_LEN = NAMESPACE.length(); + + /** + * The inline IP address. + */ + private final InetAddress value; + + /** + * The cached string representation of this IP. + */ + private transient String hostAddress; + + /** + * The cached byte[] key for the encoding of this IV. + */ + private transient byte[] key; + + /** + * The cached materialized BigdataValue for this InetAddress. + */ + private transient V uri; + + public IV<V, InetAddress> clone(final boolean clearCache) { + + final IPAddrIV<V> tmp = new IPAddrIV<V>(value); + + // Propagate the cached byte[] key. + tmp.key = key; + + // Propagate the cached BigdataValue. + tmp.uri = uri; + + if (!clearCache) { + + tmp.setValue(getValueCache()); + + } + + return tmp; + + } + + /** + * Ctor with internal value specified. + */ + public IPAddrIV(final InetAddress value) { + + /* + * TODO Using XSDBoolean so that we can know how to decode this thing + * as an IPAddrIV. We need to fix the Extension mechanism for URIs. + */ + super(VTE.URI, DTE.XSDBoolean); + + this.value = value; + + } + + /** + * Ctor with host address specified. + */ + public IPAddrIV(final String hostAddress) throws UnknownHostException { + + /* + * Note: XSDBoolean happens to be assigned the code value of 0, which is + * the value we we want when the data type enumeration will be ignored. + */ + super(VTE.URI, DTE.XSDBoolean); + + this.value = InetAddress.getByName(hostAddress); + this.hostAddress = hostAddress; + + } + + /** + * Returns the inline value. + */ + public InetAddress getInlineValue() throws UnsupportedOperationException { + return value; + } + + /** + * Returns the URI representation of this IV. + */ + public V asValue(final LexiconRelation lex) { + if (uri == null) { + uri = (V) lex.getValueFactory().createURI(getNamespace(), getLocalName()); + uri.setIV(this); + } + return uri; + } + + /** + * Return the byte length for the byte[] encoded representation of this + * internal value. Depends on the byte length of the encoded inline value. + */ + public int byteLength() { + return 1 + key().length; + } + + public String toString() { + return "IP("+getLocalName()+")"; + } + + public int hashCode() { + return value.hashCode(); + } + +// /** +// * Implements {@link BNode#getID()}. +// * <p> +// * This implementation uses the {@link BigInteger} class to create a unique +// * blank node ID based on the <code>unsigned byte[]</code> key of the inline +// * {@link SPO}. +// */ +// @Override +// public String getID() { +//// // just use the hash code. can result in collisions +//// return String.valueOf(hashCode()); +// +// // create a big integer using the spo key. should result in unique ids +// final byte[] key = key(); +// final int signum = key.length > 0 ? 1 : 0; +// final BigInteger bi = new BigInteger(signum, key); +// return 's' + bi.toString(); +// } + + @Override + public String getNamespace() { + return NAMESPACE; + } + + @Override + public String getLocalName() { + if (hostAddress == null) { + hostAddress = value.getHostAddress(); + } + return hostAddress; + } + + /** + * Two {@link IPAddrIV} are equal if their InetAddresses are equal. + */ + public boolean equals(final Object o) { + if (this == o) + return true; + if (o instanceof IPAddrIV) { + final InetAddress value2 = ((IPAddrIV<?>) o).value; + return value.equals(value2); + } + return false; + } + + public int _compareTo(IV o) { + + /* + * Note: This works, but it might be more expensive. + */ + return UnsignedByteArrayComparator.INSTANCE.compare(key(), ((IPAddrIV)o).key()); + + } + + /** + * Encode this internal value into the supplied key builder. Emits the + * flags, following by the encoded byte[] representing the spo, in SPO + * key order. + * <p> + * {@inheritDoc} + */ + @Override + public IKeyBuilder encode(final IKeyBuilder keyBuilder) { + + // First emit the flags byte. + keyBuilder.appendSigned(flags()); + + // Then append the InetAddress byte[]. + keyBuilder.append(key()); + + return keyBuilder; + + } + + private byte[] key() { + + if (key == null) { + + key = value.getAddress(); + + } + + return key; + + } + + /** + * Object provides serialization for {@link IPAddrIV} via the write-replace + * and read-replace pattern. + */ + private static class IPAddrIVState implements Externalizable { + + private static final long serialVersionUID = -1L; + +// private byte flags; + private byte[] key; + + /** + * De-serialization constructor. + */ + public IPAddrIVState() { + + } + + private IPAddrIVState(final IPAddrIV iv) { +// this.flags = flags; + this.key = iv.key(); + } + + public void readExternal(ObjectInput in) throws IOException, + ClassNotFoundException { +// flags = in.readByte(); + final int nbytes = LongPacker.unpackInt(in); + key = new byte[nbytes]; + in.readFully(key); + } + + public void writeExternal(ObjectOutput out) throws IOException { +// out.writeByte(flags); + LongPacker.packLong(out, key.length); + out.write(key); + } + + private Object readResolve() throws ObjectStreamException { + + try { + + final InetAddress value = InetAddress.getByAddress(key); + + return new IPAddrIV(value); + + } catch (UnknownHostException ex) { + + throw new RuntimeException(ex); + + } + + } + + } + + private Object writeReplace() throws ObjectStreamException { + + return new IPAddrIVState(this); + + } + + /** + * Implements {@link Value#stringValue()}. + */ + @Override + public String stringValue() { + + return getLocalName(); + + } + + /** + * Does not need materialization to answer URI interface methods. + */ + @Override + public boolean needsMaterialization() { + + return false; + + } + + +} \ No newline at end of file Property changes on: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java ___________________________________________________________________ Added: svn:mime-type + text/plain Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BNodeContextFactory.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BNodeContextFactory.java 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BNodeContextFactory.java 2014-01-26 20:37:25 UTC (rev 7829) @@ -89,6 +89,10 @@ return valueFactory.createBNode(id); } + public BigdataBNode createBNode(BigdataStatement stmt) { + return valueFactory.createBNode(stmt); + } + public BigdataLiteral createLiteral(boolean arg0) { return valueFactory.createLiteral(arg0); } Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNode.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNode.java 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNode.java 2014-01-26 20:37:25 UTC (rev 7829) @@ -67,4 +67,14 @@ */ public boolean isStatementIdentifier(); + /** + * Set the statement that this blank node models. + */ + public void setStatement(BigdataStatement stmt); + + /** + * Get the statement that this blank node models. + */ + public BigdataStatement getStatement(); + } Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNodeImpl.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNodeImpl.java 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNodeImpl.java 2014-01-26 20:37:25 UTC (rev 7829) @@ -49,7 +49,10 @@ import org.openrdf.model.BNode; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.internal.impl.bnode.SidIV; import com.bigdata.rdf.rio.StatementBuffer; +import com.bigdata.rdf.rio.UnificationException; import com.bigdata.rdf.spo.SPO; import com.bigdata.rdf.store.AbstractTripleStore; @@ -76,7 +79,7 @@ private static final long serialVersionUID = 2675602437833048872L; private final String id; - + /** * Boolean flag is set during conversion from an RDF interchange syntax * into the internal {@link SPO} model if the blank node is a statement @@ -95,14 +98,45 @@ */ BigdataBNodeImpl(final BigdataValueFactory valueFactory, final String id) { + this(valueFactory, id, null); + + } + + BigdataBNodeImpl(final BigdataValueFactory valueFactory, final String id, + final BigdataStatement stmt) { + super(valueFactory, null); if (id == null) throw new IllegalArgumentException(); this.id = id; + + this.sid = stmt; + if (stmt != null) { + this.statementIdentifier = true; + } } + + @Override + public IV getIV() { + + if (super.iv == null && sid != null) { + + if (sid.getSubject() == this || sid.getObject() == this) + throw new UnificationException("illegal self-referential sid"); + + final IV s = sid.s(); + final IV p = sid.p(); + final IV o = sid.o(); + if (s != null && p != null && o != null) { + setIV(new SidIV(new SPO(s, p, o))); + } + } + + return super.iv; + } public String toString() { @@ -179,7 +213,7 @@ /** * Marks this as a blank node which models the specified statement. * - * @param stmt + * @param sid * The statement. */ final public void setStatement(final BigdataStatement sid) { Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataStatementImpl.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataStatementImpl.java 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataStatementImpl.java 2014-01-26 20:37:25 UTC (rev 7829) @@ -63,6 +63,7 @@ protected final BigdataURI p; protected final BigdataValue o; protected final BigdataResource c; + protected IV sid = null; private StatementEnum type; private boolean userFlag; private transient boolean override = false; @@ -282,39 +283,51 @@ } - public final void setStatementIdentifier(final boolean sidable) { +// public final void setStatementIdentifier(final boolean sidable) { +// +// if (sidable && type != StatementEnum.Explicit) { +// +// // Only allowed for explicit statements. +// throw new IllegalStateException(); +// +// } +// +//// if (c == null) { +//// +//// // this SHOULD not ever happen +//// throw new IllegalStateException(); +//// +//// } +//// +//// c.setIV(new SidIV(this)); +// +// this.sid = new SidIV(this); +// +// } - if (sidable && type != StatementEnum.Explicit) { - - // Only allowed for explicit statements. - throw new IllegalStateException(); - - } - - if (c == null) { - - // this SHOULD not ever happen - throw new IllegalStateException(); - - } - - c.setIV(new SidIV(this)); - - } - public final IV getStatementIdentifier() { - if (!hasStatementIdentifier()) - throw new IllegalStateException("No statement identifier: " - + toString()); +// if (!hasStatementIdentifier()) +// throw new IllegalStateException("No statement identifier: " +// + toString()); +// +// return c.getIV(); - return c.getIV(); + if (sid == null && type == StatementEnum.Explicit) { + + sid = new SidIV(this); + + } + + return sid; } final public boolean hasStatementIdentifier() { - return c != null && c.getIV().isStatement(); +// return c != null && c.getIV().isStatement(); + + return type == StatementEnum.Explicit; } Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactory.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactory.java 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactory.java 2014-01-26 20:37:25 UTC (rev 7829) @@ -85,6 +85,8 @@ BigdataBNode createBNode(); BigdataBNode createBNode(String id); + + BigdataBNode createBNode(BigdataStatement stmt); BigdataLiteral createLiteral(String label); Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactoryImpl.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactoryImpl.java 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactoryImpl.java 2014-01-26 20:37:25 UTC (rev 7829) @@ -36,6 +36,7 @@ import org.openrdf.model.BNode; import org.openrdf.model.Literal; import org.openrdf.model.Resource; +import org.openrdf.model.Statement; import org.openrdf.model.URI; import org.openrdf.model.Value; import org.openrdf.model.datatypes.XMLDatatypeUtil; @@ -235,6 +236,12 @@ } + public BigdataBNodeImpl createBNode(final BigdataStatement stmt) { + + return new BigdataBNodeImpl(this, nextID(), stmt); + + } + public BigdataLiteralImpl createLiteral(final String label) { return new BigdataLiteralImpl(this, label, null, null); Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueImpl.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueImpl.java 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueImpl.java 2014-01-26 20:37:25 UTC (rev 7829) @@ -48,7 +48,7 @@ private volatile transient BigdataValueFactory valueFactory; - private volatile IV iv; + protected volatile IV iv; public final BigdataValueFactory getValueFactory() { @@ -115,7 +115,7 @@ } - final public IV getIV() { + public IV getIV() { return iv; Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java 2014-01-26 20:37:25 UTC (rev 7829) @@ -153,6 +153,8 @@ final RDFParser parser = Rio.createParser(rdfFormat, valueFactory); + parser.setValueFactory(valueFactory); + return parser; } Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java 2014-01-26 20:37:25 UTC (rev 7829) @@ -232,11 +232,11 @@ */ protected final int capacity; - /** - * When true only distinct terms are stored in the buffer (this is always - * true since this condition always outperforms the alternative). - */ - protected final boolean distinct = true; +// /** +// * When true only distinct terms are stored in the buffer (this is always +// * true since this condition always outperforms the alternative). +// */ +// protected final boolean distinct = true; public boolean isEmpty() { @@ -347,30 +347,22 @@ this.capacity = capacity; - values = new BigdataValue[capacity * arity]; + values = new BigdataValue[capacity * arity + 5]; stmts = new BigdataStatement[capacity]; - if (distinct) { - - /* - * initialize capacity to N times the #of statements allowed. this - * is the maximum #of distinct terms and would only be realized if - * each statement used distinct values. in practice the #of distinct - * terms will be much lower. however, also note that the map will be - * resized at .75 of the capacity so we want to over-estimate the - * maximum likely capacity by at least 25% to avoid re-building the - * hash map. - */ - - distinctTermMap = new HashMap<Value, BigdataValue>(capacity * arity); - - } else { - - distinctTermMap = null; - - } + /* + * initialize capacity to N times the #of statements allowed. this + * is the maximum #of distinct terms and would only be realized if + * each statement used distinct values. in practice the #of distinct + * terms will be much lower. however, also note that the map will be + * resized at .75 of the capacity so we want to over-estimate the + * maximum likely capacity by at least 25% to avoid re-building the + * hash map. + */ + distinctTermMap = new HashMap<Value, BigdataValue>(capacity * arity); + this.statementIdentifiers = database.getStatementIdentifiers(); if(log.isInfoEnabled()) { @@ -387,18 +379,14 @@ this.RDF_STATEMENT = valueFactory.asValue(RDF.STATEMENT); this.RDF_TYPE = valueFactory.asValue(RDF.TYPE); - if (distinct) { - - /* - * Get the reification vocabulary into the distinct term map. - */ - getDistinctTerm(RDF_SUBJECT); - getDistinctTerm(RDF_PREDICATE); - getDistinctTerm(RDF_OBJECT); - getDistinctTerm(RDF_STATEMENT); - getDistinctTerm(RDF_TYPE); - - } + /* + * Get the reification vocabulary into the distinct term map. + */ + getDistinctTerm(RDF_SUBJECT, true); + getDistinctTerm(RDF_PREDICATE, true); + getDistinctTerm(RDF_OBJECT, true); + getDistinctTerm(RDF_STATEMENT, true); + getDistinctTerm(RDF_TYPE, true); } @@ -415,8 +403,6 @@ */ public long flush() { - log.info(""); - /* * Process deferred statements (NOP unless using statement identifiers). */ @@ -552,11 +538,7 @@ while(itr.hasNext()) { final BigdataStatement stmt = itr.next(); - - if (log.isDebugEnabled()) { - log.debug(stmt.getSubject() + ", sid=" + ((BigdataBNode) stmt.getSubject()).isStatementIdentifier() + ", iv=" + stmt.s()); - } - + if (stmt.getSubject() instanceof BNode && ((BigdataBNode) stmt.getSubject()).isStatementIdentifier()) continue; @@ -569,6 +551,12 @@ log.debug("grounded: "+stmt); } + if (stmt.getSubject() instanceof BNode) + addTerm(stmt.getSubject()); + + if (stmt.getObject() instanceof BNode) + addTerm(stmt.getObject()); + // fully grounded so add to the buffer. add(stmt); @@ -703,8 +691,6 @@ */ public void reset() { - log.info(""); - _clear(); /* @@ -763,6 +749,15 @@ if (distinctTermMap != null) { distinctTermMap.clear(); + + /* + * Get the reification vocabulary into the distinct term map. + */ + getDistinctTerm(RDF_SUBJECT, true); + getDistinctTerm(RDF_PREDICATE, true); + getDistinctTerm(RDF_OBJECT, true); + getDistinctTerm(RDF_STATEMENT, true); + getDistinctTerm(RDF_TYPE, true); } @@ -775,6 +770,24 @@ */ protected void incrementalWrite() { + if (bnodes != null) { + + for (BigdataBNode bnode : bnodes.values()) { + + if (bnode.isStatementIdentifier()) + continue; + + if (bnode.getIV() != null) + continue; + + values[numValues++] = bnode; + + numBNodes++; + + } + + } + final long begin = System.currentTimeMillis(); if (log.isInfoEnabled()) { @@ -1177,9 +1190,10 @@ * @return Either the term or the pre-existing term in the buffer with the * same data. */ - protected BigdataValue getDistinctTerm(final BigdataValue term) { + protected BigdataValue getDistinctTerm(final BigdataValue term, final boolean addIfAbsent) { - assert distinct == true; + if (term == null) + return null; if (term instanceof BNode) { @@ -1193,65 +1207,125 @@ final BigdataBNode bnode = (BigdataBNode)term; - // the BNode's ID. - final String id = bnode.getID(); - - if (bnodes == null) { - - // allocating canonicalizing map for blank nodes. - bnodes = new HashMap<String, BigdataBNode>(capacity); - - // insert this blank node into the map. - bnodes.put(id, bnode); - + final BigdataStatement stmt = bnode.getStatement(); + + if (stmt != null) { + +// /* +// * Assume for now that bnodes appearing inside the terse +// * syntax without a statement attached are real bnodes, not +// * sids. +// */ +// final boolean tmp = this.statementIdentifiers; +// this.statementIdentifiers = false; + + bnode.setStatement(valueFactory.createStatement( + (BigdataResource) getDistinctTerm(stmt.getSubject(), true), + (BigdataURI) getDistinctTerm(stmt.getPredicate(), true), + (BigdataValue) getDistinctTerm(stmt.getObject(), true) + )); + +// this.statementIdentifiers = tmp; + + /* + * Do not "add if absent". This is not a real term, just a + * composition of other terms. + */ + return bnode; + } else { - - // test canonicalizing map for blank nodes. - final BigdataBNode existingBNode = bnodes.get(id); - - if (existingBNode != null) { - - // return existing blank node with same ID. - return existingBNode; - - } - - // insert this blank node into the map. - bnodes.put(id, bnode); - + + // the BNode's ID. + final String id = bnode.getID(); + + if (bnodes == null) { + + // allocating canonicalizing map for blank nodes. + bnodes = new HashMap<String, BigdataBNode>(capacity); + + // insert this blank node into the map. + bnodes.put(id, bnode); + + } else { + + // test canonicalizing map for blank nodes. + final BigdataBNode existingBNode = bnodes.get(id); + + if (existingBNode != null) { + + /* + * Return existing blank node with same ID, do not + * add since not absent. + */ + return existingBNode; + + } + + // insert this blank node into the map. + bnodes.put(id, bnode); + + } + } - return term; +// return term; - } + } else { - /* - * Other kinds of terms use a map whose scope is limited to the terms - * that are currently in the buffer. This keeps down the heap demand - * when reading very large documents. - */ - - final BigdataValue existingTerm = distinctTermMap.get(term); - - if(existingTerm != null) { - - // return the pre-existing term. - + /* + * Other kinds of terms use a map whose scope is limited to the terms + * that are currently in the buffer. This keeps down the heap demand + * when reading very large documents. + */ + + final BigdataValue existingTerm = distinctTermMap.get(term); + + if (existingTerm != null) { + + // return the pre-existing term. + + if(log.isDebugEnabled()) { + + log.debug("duplicate: "+term); + + } + + if (equals(existingTerm, RDF_SUBJECT, RDF_PREDICATE, RDF_OBJECT, RDF_TYPE, RDF_STATEMENT)) { + + if (addIfAbsent) { + + addTerm(term); + + } + + } + + /* + * Term already exists, do not add. + */ + return existingTerm; + + } + if(log.isDebugEnabled()) { - log.debug("duplicate: "+term); + log.debug("new term: "+term); } - return existingTerm; - + // put the new term in the map. + if (distinctTermMap.put(term, term) != null) { + + throw new AssertionError(); + + } + } - - // put the new term in the map. - if (distinctTermMap.put(term, term) != null) { - - throw new AssertionError(); - + + if (addIfAbsent) { + + addTerm(term); + } // return the new term. @@ -1259,6 +1333,37 @@ } + protected void addTerm(final BigdataValue term) { + + if (term == null) + return; + + if (term instanceof URI) { + + numURIs++; + + values[numValues++] = term; + + } else if (term instanceof BNode) { + +// if (!statementIdentifiers) { +// +// numBNodes++; +// +// values[numValues++] = term; +// +// } + + } else { + + numLiterals++; + + values[numValues++] = term; + + } + + } + /** * Adds the values and the statement into the buffer. * @@ -1278,244 +1383,126 @@ * * @see #nearCapacity() */ - protected void handleStatement(Resource s, URI p, Value o, Resource c, + protected void handleStatement(Resource _s, URI _p, Value _o, Resource _c, StatementEnum type) { + if (log.isDebugEnabled()) { + + log.debug("handle stmt: " + _s + ", " + _p + ", " + _o + ", " + _c); + + } + // if (arity == 3) c = null; - s = (Resource) valueFactory.asValue(s); - p = (URI) valueFactory.asValue(p); - o = valueFactory.asValue(o); - c = (Resource) valueFactory.asValue(c); + final BigdataResource s = (BigdataResource) + getDistinctTerm(valueFactory.asValue(_s), true); + final BigdataURI p = (BigdataURI) + getDistinctTerm(valueFactory.asValue(_p), true); + final BigdataValue o = + getDistinctTerm(valueFactory.asValue(_o), true); + final BigdataResource c = (BigdataResource) + getDistinctTerm(valueFactory.asValue(_c), true); - boolean duplicateS = false; - boolean duplicateP = false; - boolean duplicateO = false; - boolean duplicateC = false; - - if (distinct) { - { - final BigdataValue tmp = getDistinctTerm((BigdataValue) s); - if (tmp != s && !equals(tmp, RDF_SUBJECT, RDF_PREDICATE, RDF_OBJECT, RDF_TYPE, RDF_STATEMENT)) { - duplicateS = true; - } - s = (Resource) tmp; - } - { - final BigdataValue tmp = getDistinctTerm((BigdataValue) p); - if (tmp != p && !equals(tmp, RDF_SUBJECT, RDF_PREDICATE, RDF_OBJECT, RDF_TYPE)) { - duplicateP = true; - } - p = (URI) tmp; - } - { - final BigdataValue tmp = getDistinctTerm((BigdataValue) o); - if (tmp != o && !equals(tmp, RDF_SUBJECT, RDF_PREDICATE, RDF_OBJECT, RDF_TYPE, RDF_STATEMENT)) { - duplicateO = true; - } - o = (Value) tmp; - } - if (c != null) { - final BigdataValue tmp = getDistinctTerm((BigdataValue) c); - if (tmp != c) { - duplicateC = true; - } - c = (Resource) tmp; - } - } - /* * Form the BigdataStatement object now that we have the bindings. */ - final BigdataStatement stmt; - { - - stmt = valueFactory - .createStatement((BigdataResource) s, (BigdataURI) p, - (BigdataValue) o, (BigdataResource) c, type); + final BigdataStatement stmt = valueFactory.createStatement(s, p, o, c, type); - if (statementIdentifiers - && (s instanceof BNode || o instanceof BNode)) { + if (statementIdentifiers + && ((s instanceof BNode && ((BigdataBNode) s).getStatement() == null) +// || +// (o instanceof BNode && ((BigdataBNode) o).getStatement() == null) + )) { - /* - * When statement identifiers are enabled a statement with a - * blank node in the subject or object position must be deferred - * until the end of the source so that we determine whether it - * is being used as a statement identifier or a blank node (if - * the blank node occurs in the context position, then we know - * that it is being used as a statement identifier). - */ + /* + * When statement identifiers are enabled a statement with a + * blank node in the subject or object position must be deferred + * until the end of the source so that we determine whether it + * is being used as a statement identifier or a blank node (if + * the blank node occurs in the context position, then we know + * that it is being used as a statement identifier). + */ + + if (//s instanceof BNode && + equals(p, RDF_SUBJECT, RDF_PREDICATE, RDF_OBJECT)) { + + final BigdataBNodeImpl sid = (BigdataBNodeImpl) s; + + if (reifiedStmts == null) { + + reifiedStmts = new HashMap<BigdataBNodeImpl, ReifiedStmt>(); + + } + + final ReifiedStmt reifiedStmt; + if (reifiedStmts.containsKey(sid)) { + + reifiedStmt = reifiedStmts.get(sid); + + } else { + + reifiedStmt = new ReifiedStmt(); + + reifiedStmts.put(sid, reifiedStmt); + + } + + reifiedStmt.set(p, (BigdataValue) o); + + if (log.isDebugEnabled()) + log.debug("reified piece: "+stmt); - log.info(stmt); - - if (s instanceof BNode && - equals((BigdataValue)p, RDF_SUBJECT, RDF_PREDICATE, RDF_OBJECT)) { - - final BigdataBNodeImpl sid = (BigdataBNodeImpl) s; - - if (reifiedStmts == null) { - - reifiedStmts = new HashMap<BigdataBNodeImpl, ReifiedStmt>(); - - } - - final ReifiedStmt reifiedStmt; - if (reifiedStmts.containsKey(sid)) { - - reifiedStmt = reifiedStmts.get(sid); - - } else { - - reifiedStmt = new ReifiedStmt(); - - reifiedStmts.put(sid, reifiedStmt); - - } - - reifiedStmt.set(p, (BigdataValue) o); - - if (log.isDebugEnabled()) - log.debug("reified piece: "+stmt); - - } else if (s instanceof BNode && - equals((BigdataValue)o, RDF_STATEMENT) && equals((BigdataValue)p, RDF_TYPE)) { - - // ignore this statement - - } else { - - if (deferredStmts == null) { - - deferredStmts = new HashSet<BigdataStatement>(stmts.length); - - } - - deferredStmts.add(stmt); - - if (log.isDebugEnabled()) - log.debug("deferred: "+stmt); - - } + if (reifiedStmt.isFullyBound(arity)) { + + sid.setStatement(reifiedStmt.toStatement(valueFactory)); + + reifiedStmts.remove(sid); + + } - } else { + return; - // add to the buffer. - stmts[numStmts++] = stmt; + } +// else { +// +// if (deferredStmts == null) { +// +// deferredStmts = new HashSet<BigdataStatement>(stmts.length); +// +// } +// +// deferredStmts.add(stmt); +// +// if (log.isDebugEnabled()) +// log.debug("deferred: "+stmt); +// +// } +// +// } else { - } - } - - /* - * Update counters. - */ - if (!duplicateS) {// && ((_Value) s).termId == 0L) { + if (statementIdentifiers && s instanceof BNode && + equals(o, RDF_STATEMENT) && equals(p, RDF_TYPE)) { + + // ignore this statement + + return; + + } + + // add to the buffer. + stmts[numStmts++] = stmt; - if (s instanceof URI) { +// } - numURIs++; - - values[numValues++] = (BigdataValue) s; - - } else { - - if (!statementIdentifiers) { - - numBNodes++; - - values[numValues++] = (BigdataValue) s; - - } - - } - + if (c != null && statementIdentifiers && c instanceof BNode) { + + ((BigdataBNodeImpl) c).setStatement(stmt); + } - if (!duplicateP) {//&& ((_Value) s).termId == 0L) { - - values[numValues++] = (BigdataValue)p; - - numURIs++; - - } - - if (!duplicateO) {// && ((_Value) s).termId == 0L) { - - if (o instanceof URI) { - - numURIs++; - - values[numValues++] = (BigdataValue) o; - - } else if (o instanceof BNode) { - - if (!statementIdentifiers) { - - numBNodes++; - - values[numValues++] = (BigdataValue) o; - - } - - } else { - - numLiterals++; - - values[numValues++] = (BigdataValue) o; - - } - - } - - if (c != null && !duplicateC && ((BigdataValue) c).getIV() == null) { - - if (c instanceof URI) { - - numURIs++; - - values[numValues++] = (BigdataValue) c; - - } else { - - if (!database.getStatementIdentifiers()) { - - /* - * We only let the context node into the buffer when - * statement identifiers are disabled for the database. - * - * Note: This does NOT test [statementIdentifiers] as that - * flag is temporarily overriden when processing deferred - * statements. - */ - - values[numValues++] = (BigdataValue) c; - - numBNodes++; - - } else { - - /* - * Flag the blank node as a statement identifier since it - * appears in the context position. - * - * Note: the blank node is not inserted into values[] since - * it is a statement identifier and will be assigned when we - * insert the statement rather than based on the blank - * node's ID. - */ - - // Note: done automatically by setStatement(); -// ((BigdataBNode) c).setStatementIdentifier( true); - ((BigdataBNodeImpl) c).setStatement(stmt); - - } - - } - - } - } private boolean equals(final BigdataValue v1, final BigdataValue... v2) { @@ -1541,15 +1528,17 @@ private boolean _equals(final BigdataValue v1, final BigdataValue v2) { - if (distinct) { - - return v1 == v2; - - } else { - - return v1.equals(v2); - - } + return v1 == v2; + +// if (distinct) { +// +// return v1 == v2; +// +// } else { +// +// return v1.equals(v2); +// +// } } @@ -1639,6 +1628,12 @@ return "<" + s + ", " + p + ", " + o + ", " + c + ">"; } + + public BigdataStatement toStatement(final BigdataValueFactory vf) { + + return vf.createStatement(s, p, o, c); + + } } Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/ntriples/BigdataNTriplesParser.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/ntriples/BigdataNTriplesParser.java 2014-01-24 23:39:36 UTC (rev 7828) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/rio/ntriples/BigdataNTriplesParser.java 2014-01-26 20:37:25 UTC (rev 7829) @@ -421,31 +421,35 @@ // Create statement. BigdataStatement st = (BigdataStatement) createStatement( state.subject, state.predicate, state.object); - // Resolve against LRU map to blank node for statement. - BigdataBNode sid = sids.get(st); - if (sid != null) { - state.lastSID = sid; - } else { - /* - * Not found. - * - * TODO The use of the sid bnode in the context position should - * go away when we migrate to sids support in both triples and - * quads mode. - */ - // New blank node for "sid" of this statement. - state.lastSID = sid = (BigdataBNode) createBNode(); - // New statement using that "sid" as its context position. - st = getValueFactory().createStatement(state.subject, - state.predicate, state.object, sid); - // cache it. - sids.put(st,sid); - // mark this blank node as a "sid". - // st.setStatementIdentifier(true); - ((BigdataBNodeImpl) sid).setStatement(st); - // new statement so pass to the call back interface. - rdfHandler.handleStatement(st); - } + + state.lastSID = ((BigdataValueFactory) valueFactory).createBNode(st); + +// // Resolve against LRU map to blank node for statement. +// BigdataBNode sid = sids.get(st); +// if (sid != null) { +// state.lastSID = sid; +// } else { +// /* +// * Not found. +// * +// * TODO The use of the sid bnode in the context position should +// * go away when we migrate to sids support in both triples and +// * quads mode. +// */ +// // New blank node for "sid" of this statement. +// state.lastSID = sid = (BigdataBNode) createBNode(); +// // New statement using that "sid" as its context position. +// st = getValueFactory().createStatement(state.subject, +// state.predicate, state.object, sid); +// // cache it. +// sids.put(st,sid); +// // mark this blank node as a "sid". +// // st.setStatementIdentifier(true); +// ((BigdataBNodeImpl) sid).setStatement(st); +// // new statement so pass to the call back interface. +// rdfHandler.handleStatement(st); +// } + ... [truncated message content] |