From: <tho...@us...> - 2011-07-15 13:37:50
|
Revision: 4921 http://bigdata.svn.sourceforge.net/bigdata/?rev=4921&view=rev Author: thompsonbry Date: 2011-07-15 13:37:44 +0000 (Fri, 15 Jul 2011) Log Message: ----------- Updated the 1.0.1 release notes to reflect the inclusion of support for OSX performance counters. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_1.txt branches/TERMS_REFACTOR_BRANCH/bigdata/src/releases/RELEASE_1_0_1.txt Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_1.txt =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_1.txt 2011-07-15 13:33:05 UTC (rev 4920) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_1.txt 2011-07-15 13:37:44 UTC (rev 4921) @@ -45,6 +45,9 @@ - https://sourceforge.net/apps/trac/bigdata/ticket/351 (SPOs not serializable in scale-out). + + - https://sourceforge.net/apps/trac/bigdata/ticket/225 (OSX requires specialized + performance counter collection classes New features in 1.0.x release: Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/releases/RELEASE_1_0_1.txt =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/releases/RELEASE_1_0_1.txt 2011-07-15 13:33:05 UTC (rev 4920) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/releases/RELEASE_1_0_1.txt 2011-07-15 13:37:44 UTC (rev 4921) @@ -46,6 +46,9 @@ - https://sourceforge.net/apps/trac/bigdata/ticket/351 (SPOs not serializable in scale-out). + - https://sourceforge.net/apps/trac/bigdata/ticket/225 (OSX requires specialized + performance counter collection classes + New features in 1.0.x release: - Single machine data storage to ~50B triples/quads (RWStore); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-07-15 14:29:33
|
Revision: 4924 http://bigdata.svn.sourceforge.net/bigdata/?rev=4924&view=rev Author: thompsonbry Date: 2011-07-15 14:29:27 +0000 (Fri, 15 Jul 2011) Log Message: ----------- Removed synchronized on Banner.banner() using an AtomicBoolean. This works around a possible hot spot when large numbers of B+Tree indices are created concurrently, for example, with high concurrent transaction workloads. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/Banner.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java 2011-07-15 14:05:20 UTC (rev 4923) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java 2011-07-15 14:29:27 UTC (rev 4924) @@ -30,6 +30,7 @@ import java.lang.reflect.Method; import java.util.Date; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -47,7 +48,7 @@ */ public class Banner { - private static boolean didBanner; + private final static AtomicBoolean didBanner = new AtomicBoolean(false); /** * Environment variables understood by the {@link Banner} class. @@ -72,7 +73,7 @@ synchronized static public void banner() { - if(!didBanner) { + if(didBanner.compareAndSet(false/*expect*/, true/*update*/)) { final boolean quiet = Boolean.getBoolean(Options.QUIET); @@ -82,8 +83,6 @@ } - didBanner = true; - final Logger log = Logger.getLogger("com.bigdata"); if (log.getLevel() == null) { Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/Banner.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/Banner.java 2011-07-15 14:05:20 UTC (rev 4923) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/Banner.java 2011-07-15 14:29:27 UTC (rev 4924) @@ -30,6 +30,7 @@ import java.lang.reflect.Method; import java.util.Date; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -47,7 +48,7 @@ */ public class Banner { - private static boolean didBanner; + private final static AtomicBoolean didBanner = new AtomicBoolean(false); /** * Environment variables understood by the {@link Banner} class. @@ -70,9 +71,9 @@ } - synchronized static public void banner() { + static public void banner() { - if(!didBanner) { + if(didBanner.compareAndSet(false/*expect*/, true/*update*/)) { final boolean quiet = Boolean.getBoolean(Options.QUIET); @@ -81,8 +82,6 @@ System.out.println(banner); } - - didBanner = true; final Logger log = Logger.getLogger("com.bigdata"); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-07-18 12:29:27
|
Revision: 4945 http://bigdata.svn.sourceforge.net/bigdata/?rev=4945&view=rev Author: thompsonbry Date: 2011-07-18 12:29:21 +0000 (Mon, 18 Jul 2011) Log Message: ----------- Added test case for tickets 352, 355, and 356 to CI in the 1.0.0 release branch and the development branch. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket355.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket352.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket355.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java 2011-07-17 11:18:58 UTC (rev 4944) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java 2011-07-18 12:29:21 UTC (rev 4945) @@ -120,6 +120,8 @@ suite.addTestSuite(com.bigdata.rdf.sail.TestTicket275.class); suite.addTestSuite(com.bigdata.rdf.sail.TestTicket276.class); suite.addTestSuite(com.bigdata.rdf.sail.TestTicket348.class); + suite.addTestSuite(com.bigdata.rdf.sail.TestTicket352.class); + suite.addTestSuite(com.bigdata.rdf.sail.TestTicket355.class); suite.addTestSuite(com.bigdata.rdf.sail.DavidsTestBOps.class); Added: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket355.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket355.java (rev 0) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket355.java 2011-07-18 12:29:21 UTC (rev 4945) @@ -0,0 +1,148 @@ +/** +Copyright (C) SYSTAP, LLC 2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +package com.bigdata.rdf.sail; + +import java.io.IOException; +import java.util.Properties; + +import org.openrdf.model.ValueFactory; +import org.openrdf.query.MalformedQueryException; +import org.openrdf.query.QueryEvaluationException; +import org.openrdf.query.QueryLanguage; +import org.openrdf.query.TupleQuery; +import org.openrdf.query.TupleQueryResult; +import org.openrdf.repository.RepositoryConnection; +import org.openrdf.repository.RepositoryException; +import org.openrdf.repository.sail.SailRepository; +import org.openrdf.rio.RDFHandlerException; +import org.openrdf.rio.RDFParseException; +import org.openrdf.sail.memory.MemoryStore; + +import com.bigdata.rdf.axioms.NoAxioms; +import com.bigdata.rdf.vocab.NoVocabulary; + +/** + * Unit test template for use in submission of bugs. + * <p> + * This test case will delegate to an underlying backing store. You can specify + * this store via a JVM property as follows: + * <code>-DtestClass=com.bigdata.rdf.sail.TestBigdataSailWithQuads</code> + * <p> + * There are three possible configurations for the testClass: + * <ul> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithQuads (quads mode)</li> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithoutSids (triples mode)</li> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithSids (SIDs mode)</li> + * </ul> + * <p> + * The default for triples and SIDs mode is for inference with truth maintenance + * to be on. If you would like to turn off inference, make sure to do so in + * {@link #getProperties()}. + * + * @author <a href="mailto:mrp...@us...">Mike Personick</a> + * @version $Id: TestTicket276.java 4613 2011-06-03 11:35:18Z thompsonbry $ + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/355 + * @see https://sourceforge.net/apps/trac/bigdata/ticket/356 + */ +public class TestTicket355 extends QuadsTestCase { + + public TestTicket355() { + } + + public TestTicket355(String arg0) { + super(arg0); + } + + /** + * Please set your database properties here, except for your journal file, + * please DO NOT SPECIFY A JOURNAL FILE. + */ + @Override + public Properties getProperties() { + + final Properties props = super.getProperties(); + + /* + * For example, here is a set of five properties that turns off + * inference, truth maintenance, and the free text index. + */ + props.setProperty(BigdataSail.Options.AXIOMS_CLASS, + NoAxioms.class.getName()); + props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, + NoVocabulary.class.getName()); + props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false"); + props.setProperty(BigdataSail.Options.JUSTIFY, "false"); + props.setProperty(BigdataSail.Options.INLINE_DATE_TIMES, "true"); + props.setProperty(BigdataSail.Options.ISOLATABLE_INDICES, "true"); + props.setProperty(BigdataSail.Options.EXACT_SIZE, "true"); + props.setProperty(BigdataSail.Options.ALLOW_SESAME_QUERY_EVALUATION, + "false"); + props.setProperty(BigdataSail.Options.STATEMENT_IDENTIFIERS, "false"); + + return props; + + } + + public void testBug() throws Exception { + // try with Sesame MemoryStore: + executeQuery(new SailRepository(new MemoryStore())); + + // try with Bigdata: + final BigdataSail sail = getSail(); + try { + executeQuery(new SailRepository(sail)); + } finally { + sail.__tearDownUnitTest(); + } + + } + + private void executeQuery(final SailRepository repo) + throws RepositoryException, MalformedQueryException, + QueryEvaluationException, RDFParseException, IOException, + RDFHandlerException { + try { + repo.initialize(); + final RepositoryConnection conn = repo.getConnection(); + try { + final ValueFactory vf = conn.getValueFactory(); + conn.add(vf.createURI("os:subject"), vf.createURI("os:prop"), vf.createLiteral("value")); + + String query = "SELECT ?subj WHERE { " + + "?subj <os:prop> ?val . " + + "FILTER(STR(?val) != ?arg)}"; + TupleQuery tq = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); + tq.setBinding("arg", vf.createLiteral("notValue")); + TupleQueryResult tqr = tq.evaluate(); + assertTrue(tqr.hasNext()); + tqr.close(); + } finally { + conn.close(); + } + } finally { + repo.shutDown(); + } + } +} Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java 2011-07-17 11:18:58 UTC (rev 4944) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java 2011-07-18 12:29:21 UTC (rev 4945) @@ -120,6 +120,8 @@ suite.addTestSuite(com.bigdata.rdf.sail.TestTicket275.class); suite.addTestSuite(com.bigdata.rdf.sail.TestTicket276.class); suite.addTestSuite(com.bigdata.rdf.sail.TestTicket348.class); + suite.addTestSuite(com.bigdata.rdf.sail.TestTicket352.class); + suite.addTestSuite(com.bigdata.rdf.sail.TestTicket355.class); suite.addTestSuite(com.bigdata.rdf.sail.DavidsTestBOps.class); Added: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket352.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket352.java (rev 0) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket352.java 2011-07-18 12:29:21 UTC (rev 4945) @@ -0,0 +1,196 @@ +/** +Copyright (C) SYSTAP, LLC 2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +package com.bigdata.rdf.sail; + +import info.aduna.iteration.CloseableIteration; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; + +import org.openrdf.model.impl.LiteralImpl; +import org.openrdf.model.impl.URIImpl; +import org.openrdf.query.BindingSet; +import org.openrdf.query.QueryEvaluationException; +import org.openrdf.query.algebra.Projection; +import org.openrdf.query.algebra.ProjectionElem; +import org.openrdf.query.algebra.ProjectionElemList; +import org.openrdf.query.algebra.StatementPattern; +import org.openrdf.query.algebra.TupleExpr; +import org.openrdf.query.algebra.Var; +import org.openrdf.query.algebra.evaluation.QueryBindingSet; +import org.openrdf.sail.SailException; + +import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; + +/** + * Unit test template for use in submission of bugs. + * <p> + * This test case will delegate to an underlying backing store. You can specify + * this store via a JVM property as follows: + * <code>-DtestClass=com.bigdata.rdf.sail.TestBigdataSailWithQuads</code> + * <p> + * There are three possible configurations for the testClass: + * <ul> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithQuads (quads mode)</li> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithoutSids (triples mode)</li> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithSids (SIDs mode)</li> + * </ul> + * <p> + * The default for triples and SIDs mode is for inference with truth maintenance + * to be on. If you would like to turn off inference, make sure to do so in + * {@link #getProperties()}. + * + * @author <a href="mailto:mrp...@us...">Mike Personick</a> + * @version $Id$ + */ +public class TestTicket352 extends QuadsTestCase { + + public TestTicket352() { + } + + public TestTicket352(String arg0) { + super(arg0); + } + + public void testEvaluate() throws Exception { + final BigdataSail sail = getSail(); + try { + sail.initialize(); + final BigdataSailConnection conn = sail.getConnection(); + try { + conn.addStatement(new URIImpl("s:1"), new URIImpl("p:1"), + new LiteralImpl("l1")); + conn.addStatement(new URIImpl("s:2"), new URIImpl("p:2"), + new URIImpl("o:2")); + conn.addStatement(new URIImpl("s:3"), new URIImpl("p:3"), + new LiteralImpl("l3")); + if(log.isInfoEnabled()) + log.info("try query with BindingSet evaluate:"); + query(conn, false); + if(log.isInfoEnabled()) + log.info("try query with BindingSet stream evaluate:"); + query(conn, true); + } finally { + conn.close(); + } + } finally { + sail.shutDown(); + getSail().__tearDownUnitTest(); + } + } + + private void query(final BigdataSailConnection conn, + final boolean useIteration) throws SailException, + QueryEvaluationException { + + final ProjectionElemList elemList = new ProjectionElemList( + new ProjectionElem("z")); + + final TupleExpr query = new Projection(new StatementPattern( + new Var("s"), new Var("p"), new Var("o")), elemList); + + final QueryBindingSet bindings = mb("z", "u:is_not_in_database"); + + final CloseableIteration<? extends BindingSet, QueryEvaluationException> results; + + if (useIteration) { + + // the caller is providing a set of solutions as inputs. + final QueryBindingSet emptyQueryBindingSet = new QueryBindingSet(); + + results = conn.evaluate(query, null, emptyQueryBindingSet, + new Iter(bindings), false, null); + } else { + + // w/o the caller providing a set of solutions as inputs. + results = conn.evaluate(query, null, bindings, false); + + } + + while (results.hasNext()) { + + final BindingSet bset = results.next(); + + if (log.isInfoEnabled()) + log.info(bset.toString()); + + } + + results.close(); + + } + + /** + * Makes a binding set by taking each pair of values and using the first + * value as name and the second as value. Creates an URI for a value with a + * ':' in it, or a Literal for a value without a ':'. + */ + private QueryBindingSet mb(final String... nameValuePairs) { + + final QueryBindingSet bs = new QueryBindingSet(); + + for (int i = 0; i < nameValuePairs.length; i += 2) + bs.addBinding(nameValuePairs[i], + nameValuePairs[i + 1].indexOf(':') > 0 ? new URIImpl( + nameValuePairs[i + 1]) : new LiteralImpl( + nameValuePairs[i + 1])); + + return bs; + + } + + /** + * Iterates over the given bindings. + */ + private static class Iter implements + CloseableIteration<BindingSet, QueryEvaluationException> { + + final private Iterator<BindingSet> iter; + + private Iter(Collection<BindingSet> bindings) { + this.iter = bindings.iterator(); + } + + private Iter(BindingSet... bindings) { + this(Arrays.asList(bindings)); + } + + public boolean hasNext() throws QueryEvaluationException { + return iter.hasNext(); + } + + public BindingSet next() throws QueryEvaluationException { + return iter.next(); + } + + public void remove() throws QueryEvaluationException { + iter.remove(); + } + + public void close() throws QueryEvaluationException { + } + } + +} \ No newline at end of file Added: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket355.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket355.java (rev 0) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket355.java 2011-07-18 12:29:21 UTC (rev 4945) @@ -0,0 +1,148 @@ +/** +Copyright (C) SYSTAP, LLC 2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +package com.bigdata.rdf.sail; + +import java.io.IOException; +import java.util.Properties; + +import org.openrdf.model.ValueFactory; +import org.openrdf.query.MalformedQueryException; +import org.openrdf.query.QueryEvaluationException; +import org.openrdf.query.QueryLanguage; +import org.openrdf.query.TupleQuery; +import org.openrdf.query.TupleQueryResult; +import org.openrdf.repository.RepositoryConnection; +import org.openrdf.repository.RepositoryException; +import org.openrdf.repository.sail.SailRepository; +import org.openrdf.rio.RDFHandlerException; +import org.openrdf.rio.RDFParseException; +import org.openrdf.sail.memory.MemoryStore; + +import com.bigdata.rdf.axioms.NoAxioms; +import com.bigdata.rdf.vocab.NoVocabulary; + +/** + * Unit test template for use in submission of bugs. + * <p> + * This test case will delegate to an underlying backing store. You can specify + * this store via a JVM property as follows: + * <code>-DtestClass=com.bigdata.rdf.sail.TestBigdataSailWithQuads</code> + * <p> + * There are three possible configurations for the testClass: + * <ul> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithQuads (quads mode)</li> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithoutSids (triples mode)</li> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithSids (SIDs mode)</li> + * </ul> + * <p> + * The default for triples and SIDs mode is for inference with truth maintenance + * to be on. If you would like to turn off inference, make sure to do so in + * {@link #getProperties()}. + * + * @author <a href="mailto:mrp...@us...">Mike Personick</a> + * @version $Id: TestTicket276.java 4613 2011-06-03 11:35:18Z thompsonbry $ + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/355 + * @see https://sourceforge.net/apps/trac/bigdata/ticket/356 + */ +public class TestTicket355 extends QuadsTestCase { + + public TestTicket355() { + } + + public TestTicket355(String arg0) { + super(arg0); + } + + /** + * Please set your database properties here, except for your journal file, + * please DO NOT SPECIFY A JOURNAL FILE. + */ + @Override + public Properties getProperties() { + + final Properties props = super.getProperties(); + + /* + * For example, here is a set of five properties that turns off + * inference, truth maintenance, and the free text index. + */ + props.setProperty(BigdataSail.Options.AXIOMS_CLASS, + NoAxioms.class.getName()); + props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, + NoVocabulary.class.getName()); + props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false"); + props.setProperty(BigdataSail.Options.JUSTIFY, "false"); + props.setProperty(BigdataSail.Options.INLINE_DATE_TIMES, "true"); + props.setProperty(BigdataSail.Options.ISOLATABLE_INDICES, "true"); + props.setProperty(BigdataSail.Options.EXACT_SIZE, "true"); + props.setProperty(BigdataSail.Options.ALLOW_SESAME_QUERY_EVALUATION, + "false"); + props.setProperty(BigdataSail.Options.STATEMENT_IDENTIFIERS, "false"); + + return props; + + } + + public void testBug() throws Exception { + // try with Sesame MemoryStore: + executeQuery(new SailRepository(new MemoryStore())); + + // try with Bigdata: + final BigdataSail sail = getSail(); + try { + executeQuery(new SailRepository(sail)); + } finally { + sail.__tearDownUnitTest(); + } + + } + + private void executeQuery(final SailRepository repo) + throws RepositoryException, MalformedQueryException, + QueryEvaluationException, RDFParseException, IOException, + RDFHandlerException { + try { + repo.initialize(); + final RepositoryConnection conn = repo.getConnection(); + try { + final ValueFactory vf = conn.getValueFactory(); + conn.add(vf.createURI("os:subject"), vf.createURI("os:prop"), vf.createLiteral("value")); + + String query = "SELECT ?subj WHERE { " + + "?subj <os:prop> ?val . " + + "FILTER(STR(?val) != ?arg)}"; + TupleQuery tq = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); + tq.setBinding("arg", vf.createLiteral("notValue")); + TupleQueryResult tqr = tq.evaluate(); + assertTrue(tqr.hasNext()); + tqr.close(); + } finally { + conn.close(); + } + } finally { + repo.shutDown(); + } + } +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-07-25 16:09:05
|
Revision: 4966 http://bigdata.svn.sourceforge.net/bigdata/?rev=4966&view=rev Author: thompsonbry Date: 2011-07-25 16:08:58 +0000 (Mon, 25 Jul 2011) Log Message: ----------- Updated release notes for 1.0.1 to reflect close of additional tickets. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_1.txt branches/TERMS_REFACTOR_BRANCH/bigdata/src/releases/RELEASE_1_0_1.txt Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_1.txt =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_1.txt 2011-07-25 13:25:17 UTC (rev 4965) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_1.txt 2011-07-25 16:08:58 UTC (rev 4966) @@ -1,91 +1,100 @@ -This is a bigdata (R) release. This release is capable of loading 1B triples in -under one hour on a 15 node cluster. JDK 1.6 is required. - -Bigdata(R) is a horizontally scaled open source architecture for indexed data -with an emphasis on semantic web data architectures. Bigdata operates in both -a single machine mode (Journal) and a cluster mode (Federation). The Journal -provides fast scalable ACID indexed storage for very large data sets. The -federation provides fast scalable shard-wise parallel indexed storage using -dynamic sharding and shard-wise ACID updates. Both platforms support fully -concurrent readers with snapshot isolation. - -Distributed processing offers greater throughput but does not reduce query or -update latency. Choose the Journal when the anticipated scale and throughput -requirements permit. Choose the Federation when the administrative and machine -overhead associated with operating a cluster is an acceptable tradeoff to have -essentially unlimited data scaling and throughput. - -See [1,2,8] for instructions on installing bigdata(R), [4] for the javadoc, and -[3,5,6] for news, questions, and the latest developments. For more information -about SYSTAP, LLC and bigdata, see [7]. - -Starting with this release, we offer a WAR artifact [8] for easy installation of -the Journal mode database. For custom development and cluster installations we -recommend checking out the code from SVN using the tag for this release. The -code will build automatically under eclipse. You can also build the code using -the ant script. The cluster installer requires the use of the ant script. - -You can checkout this release from the following URL: - -https://bigdata.svn.sourceforge.net/svnroot/bigdata/tags/BIGDATA_RELEASE_1_0_1 - -Bug fixes: - - - https://sourceforge.net/apps/trac/bigdata/ticket/349 (TermIdEncoder limits - Journal to 2B distinct RDF Values per triple/quad store instance). - - - https://sourceforge.net/apps/trac/bigdata/ticket/124 (TermIdEncoder should - use more bits for scale-out). - - - https://sourceforge.net/apps/trac/bigdata/ticket/107 (Unicode clean schema - names in the sparse row store). - - - https://sourceforge.net/apps/trac/bigdata/ticket/348 (BigdataValueFactory.asValue() - must return new instance when DummyIV is used). - - - https://sourceforge.net/apps/trac/bigdata/ticket/351 (SPOs not serializable - in scale-out). - - - https://sourceforge.net/apps/trac/bigdata/ticket/225 (OSX requires specialized - performance counter collection classes - -New features in 1.0.x release: - -- Single machine data storage to ~50B triples/quads (RWStore); -- Simple embedded and/or webapp deployment (NanoSparqlServer); -- 100% native SPARQL 1.0 evaluation with lots of query optimizations; - -Feature summary: - -- Triples, quads, or triples with provenance (SIDs); -- Fast RDFS+ inference and truth maintenance; -- Clustered data storage is essentially unlimited; -- Fast statement level provenance mode (SIDs). - -The road map [3] for the next releases includes: - -- High-volume analytic query and SPARQL 1.1 query, including aggregations; -- Simplified deployment, configuration, and administration for clusters; and -- High availability for the journal and the cluster. - -For more information, please see the following links: - -[1] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Main_Page -[2] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=GettingStarted -[3] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Roadmap -[4] http://www.bigdata.com/bigdata/docs/api/ -[5] http://sourceforge.net/projects/bigdata/ -[6] http://www.bigdata.com/blog -[7] http://www.systap.com/bigdata.htm -[8] https://sourceforge.net/projects/bigdata/files/bigdata/ - -About bigdata: - -Bigdata(R) is a horizontally-scaled, general purpose storage and computing fabric -for ordered data (B+Trees), designed to operate on either a single server or a -cluster of commodity hardware. Bigdata(R) uses dynamically partitioned key-range -shards in order to remove any realistic scaling limits - in principle, bigdata\xAE -may be deployed on 10s, 100s, or even thousands of machines and new capacity may -be added incrementally without requiring the full reload of all data. The bigdata\xAE -RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), -and datum level provenance. +This is a bigdata (R) release. This release is capable of loading 1B triples in +under one hour on a 15 node cluster. JDK 1.6 is required. + +Bigdata(R) is a horizontally scaled open source architecture for indexed data +with an emphasis on semantic web data architectures. Bigdata operates in both +a single machine mode (Journal) and a cluster mode (Federation). The Journal +provides fast scalable ACID indexed storage for very large data sets. The +federation provides fast scalable shard-wise parallel indexed storage using +dynamic sharding and shard-wise ACID updates. Both platforms support fully +concurrent readers with snapshot isolation. + +Distributed processing offers greater throughput but does not reduce query or +update latency. Choose the Journal when the anticipated scale and throughput +requirements permit. Choose the Federation when the administrative and machine +overhead associated with operating a cluster is an acceptable tradeoff to have +essentially unlimited data scaling and throughput. + +See [1,2,8] for instructions on installing bigdata(R), [4] for the javadoc, and +[3,5,6] for news, questions, and the latest developments. For more information +about SYSTAP, LLC and bigdata, see [7]. + +Starting with this release, we offer a WAR artifact [8] for easy installation of +the Journal mode database. For custom development and cluster installations we +recommend checking out the code from SVN using the tag for this release. The +code will build automatically under eclipse. You can also build the code using +the ant script. The cluster installer requires the use of the ant script. + +You can checkout this release from the following URL: + +https://bigdata.svn.sourceforge.net/svnroot/bigdata/tags/BIGDATA_RELEASE_1_0_1 + +Bug fixes: + + - https://sourceforge.net/apps/trac/bigdata/ticket/107 (Unicode clean schema + names in the sparse row store). + + - https://sourceforge.net/apps/trac/bigdata/ticket/124 (TermIdEncoder should + use more bits for scale-out). + + - https://sourceforge.net/apps/trac/bigdata/ticket/225 (OSX requires specialized + performance counter collection classes). + + - https://sourceforge.net/apps/trac/bigdata/ticket/348 (BigdataValueFactory.asValue() + must return new instance when DummyIV is used). + + - https://sourceforge.net/apps/trac/bigdata/ticket/349 (TermIdEncoder limits + Journal to 2B distinct RDF Values per triple/quad store instance). + + - https://sourceforge.net/apps/trac/bigdata/ticket/351 (SPO not Serializable + exception in SIDS mode (scale-out)). + + - https://sourceforge.net/apps/trac/bigdata/ticket/352 (ClassCastException when + querying with binding-values that are not known to the database). + + - https://sourceforge.net/apps/trac/bigdata/ticket/353 (UnsupportedOperatorException + for some SPARQL queries). + + - https://sourceforge.net/apps/trac/bigdata/ticket/355 (Query failure when + comparing with non materialized value). + +New features: + +- Single machine data storage to ~50B triples/quads (RWStore); +- Simple embedded and/or webapp deployment (NanoSparqlServer); +- 100% native SPARQL 1.0 evaluation with lots of query optimizations; + +Feature summary: + +- Triples, quads, or triples with provenance (SIDs); +- Fast RDFS+ inference and truth maintenance; +- Clustered data storage is essentially unlimited; +- Fast statement level provenance mode (SIDs). + +The road map [3] for the next releases includes: + +- High-volume analytic query and SPARQL 1.1 query, including aggregations; +- Simplified deployment, configuration, and administration for clusters; and +- High availability for the journal and the cluster. + +For more information, please see the following links: + +[1] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Main_Page +[2] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=GettingStarted +[3] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Roadmap +[4] http://www.bigdata.com/bigdata/docs/api/ +[5] http://sourceforge.net/projects/bigdata/ +[6] http://www.bigdata.com/blog +[7] http://www.systap.com/bigdata.htm +[8] https://sourceforge.net/projects/bigdata/files/bigdata/ + +About bigdata: + +Bigdata\xAE is a horizontally-scaled, general purpose storage and computing fabric +for ordered data (B+Trees), designed to operate on either a single server or a +cluster of commodity hardware. Bigdata\xAE uses dynamically partitioned key-range +shards in order to remove any realistic scaling limits - in principle, bigdata\xAE +may be deployed on 10s, 100s, or even thousands of machines and new capacity may +be added incrementally without requiring the full reload of all data. The bigdata\xAE +RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), +and datum level provenance. Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/releases/RELEASE_1_0_1.txt =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/releases/RELEASE_1_0_1.txt 2011-07-25 13:25:17 UTC (rev 4965) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/releases/RELEASE_1_0_1.txt 2011-07-25 16:08:58 UTC (rev 4966) @@ -1,91 +1,100 @@ -This is a bigdata (R) release. This release is capable of loading 1B triples in -under one hour on a 15 node cluster. JDK 1.6 is required. - -Bigdata(R) is a horizontally scaled open source architecture for indexed data -with an emphasis on semantic web data architectures. Bigdata operates in both -a single machine mode (Journal) and a cluster mode (Federation). The Journal -provides fast scalable ACID indexed storage for very large data sets. The -federation provides fast scalable shard-wise parallel indexed storage using -dynamic sharding and shard-wise ACID updates. Both platforms support fully -concurrent readers with snapshot isolation. - -Distributed processing offers greater throughput but does not reduce query or -update latency. Choose the Journal when the anticipated scale and throughput -requirements permit. Choose the Federation when the administrative and machine -overhead associated with operating a cluster is an acceptable tradeoff to have -essentially unlimited data scaling and throughput. - -See [1,2,8] for instructions on installing bigdata(R), [4] for the javadoc, and -[3,5,6] for news, questions, and the latest developments. For more information -about SYSTAP, LLC and bigdata, see [7]. - -Starting with this release, we offer a WAR artifact [8] for easy installation of -the Journal mode database. For custom development and cluster installations we -recommend checking out the code from SVN using the tag for this release. The -code will build automatically under eclipse. You can also build the code using -the ant script. The cluster installer requires the use of the ant script. - -You can checkout this release from the following URL: - -https://bigdata.svn.sourceforge.net/svnroot/bigdata/tags/BIGDATA_RELEASE_1_0_1 - -Bug fixes: - - - https://sourceforge.net/apps/trac/bigdata/ticket/349 (TermIdEncoder limits - Journal to 2B distinct RDF Values per triple/quad store instance). - - - https://sourceforge.net/apps/trac/bigdata/ticket/124 (TermIdEncoder should - use more bits for scale-out). - - - https://sourceforge.net/apps/trac/bigdata/ticket/107 (Unicode clean schema - names in the sparse row store). - - - https://sourceforge.net/apps/trac/bigdata/ticket/348 (BigdataValueFactory.asValue() - must return new instance when DummyIV is used). - - - https://sourceforge.net/apps/trac/bigdata/ticket/351 (SPOs not serializable - in scale-out). - - - https://sourceforge.net/apps/trac/bigdata/ticket/225 (OSX requires specialized - performance counter collection classes - -New features in 1.0.x release: - -- Single machine data storage to ~50B triples/quads (RWStore); -- Simple embedded and/or webapp deployment (NanoSparqlServer); -- 100% native SPARQL 1.0 evaluation with lots of query optimizations; - -Feature summary: - -- Triples, quads, or triples with provenance (SIDs); -- Fast RDFS+ inference and truth maintenance; -- Clustered data storage is essentially unlimited; -- Fast statement level provenance mode (SIDs). - -The road map [3] for the next releases includes: - -- High-volume analytic query and SPARQL 1.1 query, including aggregations; -- Simplified deployment, configuration, and administration for clusters; and -- High availability for the journal and the cluster. - -For more information, please see the following links: - -[1] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Main_Page -[2] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=GettingStarted -[3] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Roadmap -[4] http://www.bigdata.com/bigdata/docs/api/ -[5] http://sourceforge.net/projects/bigdata/ -[6] http://www.bigdata.com/blog -[7] http://www.systap.com/bigdata.htm -[8] https://sourceforge.net/projects/bigdata/files/bigdata/ - -About bigdata: - -Bigdata(R) is a horizontally-scaled, general purpose storage and computing fabric -for ordered data (B+Trees), designed to operate on either a single server or a -cluster of commodity hardware. Bigdata(R) uses dynamically partitioned key-range -shards in order to remove any realistic scaling limits - in principle, bigdata\xAE -may be deployed on 10s, 100s, or even thousands of machines and new capacity may -be added incrementally without requiring the full reload of all data. The bigdata\xAE -RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), -and datum level provenance. +This is a bigdata (R) release. This release is capable of loading 1B triples in +under one hour on a 15 node cluster. JDK 1.6 is required. + +Bigdata(R) is a horizontally scaled open source architecture for indexed data +with an emphasis on semantic web data architectures. Bigdata operates in both +a single machine mode (Journal) and a cluster mode (Federation). The Journal +provides fast scalable ACID indexed storage for very large data sets. The +federation provides fast scalable shard-wise parallel indexed storage using +dynamic sharding and shard-wise ACID updates. Both platforms support fully +concurrent readers with snapshot isolation. + +Distributed processing offers greater throughput but does not reduce query or +update latency. Choose the Journal when the anticipated scale and throughput +requirements permit. Choose the Federation when the administrative and machine +overhead associated with operating a cluster is an acceptable tradeoff to have +essentially unlimited data scaling and throughput. + +See [1,2,8] for instructions on installing bigdata(R), [4] for the javadoc, and +[3,5,6] for news, questions, and the latest developments. For more information +about SYSTAP, LLC and bigdata, see [7]. + +Starting with this release, we offer a WAR artifact [8] for easy installation of +the Journal mode database. For custom development and cluster installations we +recommend checking out the code from SVN using the tag for this release. The +code will build automatically under eclipse. You can also build the code using +the ant script. The cluster installer requires the use of the ant script. + +You can checkout this release from the following URL: + +https://bigdata.svn.sourceforge.net/svnroot/bigdata/tags/BIGDATA_RELEASE_1_0_1 + +Bug fixes: + + - https://sourceforge.net/apps/trac/bigdata/ticket/107 (Unicode clean schema + names in the sparse row store). + + - https://sourceforge.net/apps/trac/bigdata/ticket/124 (TermIdEncoder should + use more bits for scale-out). + + - https://sourceforge.net/apps/trac/bigdata/ticket/225 (OSX requires specialized + performance counter collection classes). + + - https://sourceforge.net/apps/trac/bigdata/ticket/348 (BigdataValueFactory.asValue() + must return new instance when DummyIV is used). + + - https://sourceforge.net/apps/trac/bigdata/ticket/349 (TermIdEncoder limits + Journal to 2B distinct RDF Values per triple/quad store instance). + + - https://sourceforge.net/apps/trac/bigdata/ticket/351 (SPO not Serializable + exception in SIDS mode (scale-out)). + + - https://sourceforge.net/apps/trac/bigdata/ticket/352 (ClassCastException when + querying with binding-values that are not known to the database). + + - https://sourceforge.net/apps/trac/bigdata/ticket/353 (UnsupportedOperatorException + for some SPARQL queries). + + - https://sourceforge.net/apps/trac/bigdata/ticket/355 (Query failure when + comparing with non materialized value). + +New features: + +- Single machine data storage to ~50B triples/quads (RWStore); +- Simple embedded and/or webapp deployment (NanoSparqlServer); +- 100% native SPARQL 1.0 evaluation with lots of query optimizations; + +Feature summary: + +- Triples, quads, or triples with provenance (SIDs); +- Fast RDFS+ inference and truth maintenance; +- Clustered data storage is essentially unlimited; +- Fast statement level provenance mode (SIDs). + +The road map [3] for the next releases includes: + +- High-volume analytic query and SPARQL 1.1 query, including aggregations; +- Simplified deployment, configuration, and administration for clusters; and +- High availability for the journal and the cluster. + +For more information, please see the following links: + +[1] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Main_Page +[2] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=GettingStarted +[3] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Roadmap +[4] http://www.bigdata.com/bigdata/docs/api/ +[5] http://sourceforge.net/projects/bigdata/ +[6] http://www.bigdata.com/blog +[7] http://www.systap.com/bigdata.htm +[8] https://sourceforge.net/projects/bigdata/files/bigdata/ + +About bigdata: + +Bigdata\xAE is a horizontally-scaled, general purpose storage and computing fabric +for ordered data (B+Trees), designed to operate on either a single server or a +cluster of commodity hardware. Bigdata\xAE uses dynamically partitioned key-range +shards in order to remove any realistic scaling limits - in principle, bigdata\xAE +may be deployed on 10s, 100s, or even thousands of machines and new capacity may +be added incrementally without requiring the full reload of all data. The bigdata\xAE +RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), +and datum level provenance. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-07-25 23:31:31
|
Revision: 4969 http://bigdata.svn.sourceforge.net/bigdata/?rev=4969&view=rev Author: thompsonbry Date: 2011-07-25 23:31:25 +0000 (Mon, 25 Jul 2011) Log Message: ----------- Reduced log levels for two log statements per inline comment from INFO to TRACE. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java 2011-07-25 19:38:10 UTC (rev 4968) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java 2011-07-25 23:31:25 UTC (rev 4969) @@ -649,8 +649,8 @@ try { - if(log.isInfoEnabled())//FIXME TRACE - log.info(msg.toString()); + if(log.isTraceEnabled()) + log.trace(msg.toString()); if (runState.startOp(msg)) { @@ -709,8 +709,8 @@ try { - if(log.isInfoEnabled())//FIXME TRACE - log.info(msg.toString()); + if(log.isTraceEnabled()) + log.trace(msg.toString()); // update per-operator statistics. final BOpStats tmp = statsMap.putIfAbsent(msg.bopId, msg.taskStats); Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java 2011-07-25 19:38:10 UTC (rev 4968) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java 2011-07-25 23:31:25 UTC (rev 4969) @@ -649,8 +649,8 @@ try { - if(log.isInfoEnabled())//FIXME TRACE - log.info(msg.toString()); + if(log.isTraceEnabled()) + log.trace(msg.toString()); if (runState.startOp(msg)) { @@ -709,8 +709,8 @@ try { - if(log.isInfoEnabled())//FIXME TRACE - log.info(msg.toString()); + if(log.isTraceEnabled()) + log.trace(msg.toString()); // update per-operator statistics. final BOpStats tmp = statsMap.putIfAbsent(msg.bopId, msg.taskStats); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-07-26 15:12:03
|
Revision: 4970 http://bigdata.svn.sourceforge.net/bigdata/?rev=4970&view=rev Author: thompsonbry Date: 2011-07-26 15:11:55 +0000 (Tue, 26 Jul 2011) Log Message: ----------- Added an ExportKB utility as described at [1] and [2] to the 1.0.0 release branch and the development branch. Imported the CreateKB utility from the 1.0.0 release branch to the development branch. [1] https://sourceforge.net/apps/trac/bigdata/ticket/350#comment:11 [2] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=DataMigration Added Paths: ----------- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/ExportKB.java branches/TERMS_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/service/jini/util/CreateKB.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/ExportKB.java Added: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/ExportKB.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/ExportKB.java (rev 0) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/ExportKB.java 2011-07-26 15:11:55 UTC (rev 4970) @@ -0,0 +1,594 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Jul 26, 2011 + */ + +package com.bigdata.rdf.sail; + +import info.aduna.iteration.CloseableIteration; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Enumeration; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.zip.GZIPOutputStream; + +import org.apache.log4j.Logger; +import org.openrdf.model.Resource; +import org.openrdf.model.Statement; +import org.openrdf.rio.RDFFormat; +import org.openrdf.rio.RDFHandlerException; +import org.openrdf.rio.RDFWriter; +import org.openrdf.rio.RDFWriterRegistry; +import org.openrdf.sail.SailConnection; +import org.openrdf.sail.SailException; + +import com.bigdata.Banner; +import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.Journal; +import com.bigdata.rawstore.IRawStore; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.relation.RelationSchema; +import com.bigdata.relation.locator.ILocatableResource; +import com.bigdata.sparse.ITPS; + +/** + * Utility class for exporting the configuration properties and data associated + * with one or more KBs on a {@link Journal}. + * + * @see <a + * href="https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=DataMigration">Data + * Migration</a>. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class ExportKB { + + private static final Logger log = Logger.getLogger(ExportKB.class); + + /** + * The KB to be exported. + */ + private final AbstractTripleStore kb; + + /** + * The namespace associated with that KB. + */ + private final String namespace; + + /** + * The directory into which the KB properties and data will be written. + */ + private final File kbdir; + + /** + * The {@link RDFFormat} which will be used when the data are exported. + */ + private final RDFFormat format; + + /** + * When <code>true</code> inferences and axioms will also be exported. + * Otherwise just the explicitly given (aka told) triples/quads will be + * exported. + */ + private final boolean includeInferred; + + /** + * + * @param kb + * The KB instance. + * @param kbdir + * The directory into which the exported properties and RDF data + * will be written. + * @param format + * The {@link RDFFormat} to use when exporting the data. + * @param includeInferred + * When <code>true</code> inferences and axioms will also be + * exported. Otherwise just the explicitly given (aka told) + * triples/quads will be exported. + */ + public ExportKB(final AbstractTripleStore kb, final File kbdir, + final RDFFormat format, final boolean includeInferred) { + + if (kb == null) + throw new IllegalArgumentException("KB not specified."); + + if (kbdir == null) + throw new IllegalArgumentException( + "Output directory not specified."); + + if (format == null) + throw new IllegalArgumentException("RDFFormat not specified."); + + if (kb.isStatementIdentifiers() && !RDFFormat.RDFXML.equals(format)) + throw new IllegalArgumentException( + "SIDs mode requires RDF/XML interchange."); + + if (kb.isQuads() && !format.supportsContexts()) + throw new IllegalArgumentException( + "RDFFormat does not support quads: " + format); + + this.kb = kb; + + this.namespace = kb.getNamespace(); + + this.kbdir = kbdir; + + this.format = format; + + this.includeInferred = includeInferred; + + } + + /** + * Munge a name index so that it is suitable for use in a filesystem. In + * particular, any non-word characters are converted to an underscore + * character ("_"). This gets rid of all punctuation characters and + * whitespace in the index name itself, but will not translate unicode + * characters. + * + * @param s + * The name of the scale-out index. + * + * @return A string suitable for inclusion in a filename. + */ + static private String munge(final String s) { + + return s.replaceAll("[\\W]", "_"); + + } + + /** + * Export the properties and data for the KB. + * + * @throws IOException + * @throws SailException + * @throws RDFHandlerException + */ + public void export() throws IOException, SailException, RDFHandlerException { + + System.out.println("Effective output directory: " + kbdir); + + prepare(); + + exportProperties(); + + exportData(); + + } + + public void prepare() throws IOException { + if (!kbdir.exists()) { + if (!kbdir.mkdirs()) + throw new IOException("Could not create directory: " + kbdir); + } + } + + /** + * Export the configuration properties for the kb. + * + * @throws IOException + */ + public void exportProperties() throws IOException { + prepare(); + // Prepare a comment block for the properties file. + final StringBuilder comments = new StringBuilder( + "Configuration properties.\n"); + if (kb.getIndexManager() instanceof IRawStore) { + comments.append("source=" + + ((IRawStore) kb.getIndexManager()).getFile() + "\n"); + comments.append("namespace=" + namespace + "\n"); + // The timestamp of the KB view. + comments.append("timestamp=" + kb.getTimestamp() + "\n"); + // The date and time when the KB export began. (Automatically added by Java). +// comments.append("exportDate=" + new Date() + "\n"); + // The approximate #of statements (includes axioms, inferences, and + // deleted statements). + comments.append("fastStatementCount=" + + kb.getStatementCount(false/* exact */) + "\n"); + // The #of URIs in the lexicon indices. + comments.append("uriCount=" + kb.getURICount() + "\n"); + // The #of Literals in the lexicon indices. + comments.append("literalCount=" + kb.getLiteralCount() + "\n"); + // The #of blank nodes in the lexicon indices. + comments.append("bnodeCount=" + kb.getBNodeCount() + "\n"); + } + // Flatten the properties so inherited defaults will also be written + // out. + final Properties properties = flatCopy(kb.getProperties()); + // Write the properties file. + final File file = new File(kbdir, "kb.properties"); + System.out.println("Writing " + file); + final OutputStream os = new BufferedOutputStream(new FileOutputStream( + file)); + try { + properties.store(os, comments.toString()); + } finally { + os.close(); + } + } + + /** + * Exports all told statements associated with the last commit point for the + * KB. + * + * @throws IOException + * @throws SailException + * @throws RDFHandlerException + */ + public void exportData() throws IOException, SailException, + RDFHandlerException { + prepare(); + final BigdataSail sail = new BigdataSail(kb); + try { + sail.initialize(); + final SailConnection conn = sail.getReadOnlyConnection(); + try { + final CloseableIteration<? extends Statement, SailException> itr = conn + .getStatements(null/* s */, null/* p */, null/* o */, + includeInferred, new Resource[] {}/* contexts */); + try { + final File file = new File(kbdir, "data." + + format.getDefaultFileExtension()+".gz"); + System.out.println("Writing " + file); + final OutputStream os = new GZIPOutputStream( + new FileOutputStream(file)); + try { + final RDFWriter writer = RDFWriterRegistry + .getInstance().get(format).getWriter(os); + writer.startRDF(); + while (itr.hasNext()) { + final Statement stmt = itr.next(); + writer.handleStatement(stmt); + } + writer.endRDF(); + } finally { + os.close(); + } + } finally { + itr.close(); + } + } finally { + conn.close(); + } + } finally { + sail.shutDown(); + } + + } + + /** + * Return a list of the namespaces for the {@link AbstractTripleStore}s + * registered against the bigdata instance. + */ + static List<String> getNamespaces(final IIndexManager indexManager) { + + // the triple store namespaces. + final List<String> namespaces = new LinkedList<String>(); + + // scan the relation schema in the global row store. + @SuppressWarnings("unchecked") + final Iterator<ITPS> itr = (Iterator<ITPS>) indexManager + .getGlobalRowStore().rangeIterator(RelationSchema.INSTANCE); + + while (itr.hasNext()) { + + // A timestamped property value set is a logical row with + // timestamped property values. + final ITPS tps = itr.next(); + + // If you want to see what is in the TPS, uncomment this. +// System.err.println(tps.toString()); + + // The namespace is the primary key of the logical row for the + // relation schema. + final String namespace = (String) tps.getPrimaryKey(); + + // Get the name of the implementation class + // (AbstractTripleStore, SPORelation, LexiconRelation, etc.) + final String className = (String) tps.get(RelationSchema.CLASS) + .getValue(); + + try { + final Class<?> cls = Class.forName(className); + if (AbstractTripleStore.class.isAssignableFrom(cls)) { + // this is a triple store (vs something else). + namespaces.add(namespace); + } + } catch (ClassNotFoundException e) { + log.error(e,e); + } + + } + + return namespaces; + + } + + /** + * Load a {@link Properties} object from a file. + * + * @param file + * The property file. + * + * @return The {@link Properties}. + * + * @throws IOException + */ + static private Properties loadProperties(final File file) + throws IOException { + + final Properties p = new Properties(); + + final InputStream is = new BufferedInputStream( + new FileInputStream(file)); + + try { + + p.load(is); + + } finally { + + is.close(); + } + + return p; + + } + + static public Properties flatCopy(final Properties props) { + + final Properties tmp = new Properties(); + + tmp.putAll(flatten(props)); + + return tmp; + + } + + private static Map<String,String> flatten(final Properties properties) { + + if (properties == null) { + + throw new IllegalArgumentException(); + + } + + final Map<String,String> out = new LinkedHashMap<String, String>(); + + final Enumeration<?> e = properties.propertyNames(); + + while (e.hasMoreElements()) { + + final String property = (String) e.nextElement(); + + final String propertyValue = properties.getProperty(property); + + if (propertyValue != null) + out.put(property, propertyValue); + + } + + return out; + + } + + /** + * Export one or more KBs from a Journal. The only required argument is the + * name of the properties file for the Journal. By default all KB instances + * found on the journal will be exported into the current working directory. + * Each KB will be written into a subdirectory based on the namespace of the + * KB. + * + * @param args + * <code>[options] propertyFile namespace*</code> where + * <i>options</i> is any of: + * <dl> + * <dt>-outdir</dt> + * <dd>The output directory (default is the current working + * directory)</dd> + * <dt>-format</dt> + * <dd>The {@link RDFFormat} which will be used to export the + * data. If not specified then an appropriate format will be + * selected based on the KB configuration. The default for + * triples or SIDs is {@link RDFFormat#RDFXML}. The default for + * quads is {@link RDFFormat#TRIX}.</dd> + * <dt>-includeInferred</dt> + * <dd>Normally only the told triples/quads will be exported. + * This option may be given to export the axioms and inferences + * as well as the told triples/quads.</dd> + * <dt>-n</dt> + * <dd>Do nothing, but show the KBs which would be exported.</dd> + * <dt>-help</dt> + * <dd>Display the usage message and exit.</dd> + * </dl> + * where <i>propertyFile</i> is the properties file for the + * Journal.<br/> + * where <i>namespace</i> is zero or more namespaces of KBs to + * export from the Journal. If no namespace is given, then all + * KBs on the Journal will be exported. + * + * @throws Exception + */ + public static void main(final String[] args) throws Exception { + + Banner.banner(); + + /* + * Defaults for options. + */ + boolean nothing = false; + boolean includeInferred = false; + RDFFormat format = null; + File propertyFile = null; + File outdir = new File("."); + final List<String> namespaces = new LinkedList<String>(); + + // Parse options. + int i = 0; + for (; i < args.length; ) { + final String s = args[i]; + if (!s.startsWith("-")) { + // end of options. + break; + } + i++; + if(s.equals("-n")) { + nothing = true; + } else if(s.equals("-help")) { + usage(); + System.exit(0); + } else if(s.equals("-format")) { + format = RDFFormat.valueOf(args[i++]); + } else if(s.equals("-includeInferred")) { + includeInferred = true; + } else if (s.equals("-outdir")) { + outdir = new File(args[i++]); + } else { + System.err.println("Unknown option: " + s); + usage(); + System.exit(1); + } + } + + // properties file. + if (i == args.length) { + usage(); + System.exit(1); + } else { + propertyFile = new File(args[i++]); + if (!propertyFile.exists()) { + System.err.println("No such file: " + propertyFile); + System.exit(1); + } + } + + // Load the properties from the file. + final Properties properties = loadProperties(propertyFile); + + /* + * Allow override of select options. + */ + { + final String[] overrides = new String[] { + // Journal options. + com.bigdata.journal.Options.FILE, + }; + for (String s : overrides) { + if (System.getProperty(s) != null) { + // Override/set from the environment. + final String v = System.getProperty(s); + System.out.println("Using: " + s + "=" + v); + properties.setProperty(s, v); + } + } + } + + // Open the journal. + final Journal indexManager = new Journal(properties); + try { + + // The last commit time on the store. + final long commitTime = indexManager.getLastCommitTime(); + + if (i == args.length) { + // Use all namespaces. + namespaces.addAll(getNamespaces(indexManager)); + } else { + // use just the given namespace(s). + for (; i < args.length;) { + final String namespace = args[i++]; + // Verify that the KB exists. + final ILocatableResource<?> kb = indexManager + .getResourceLocator().locate(namespace, commitTime); + if (kb == null) { + throw new RuntimeException("No such namespace: " + + namespace); + } + if (!(kb instanceof AbstractTripleStore)) { + throw new RuntimeException("Not a KB: " + namespace); + } + namespaces.add(namespace); + } + } + + for (String namespace : namespaces) { + + // Get KB view. + final AbstractTripleStore kb = (AbstractTripleStore) indexManager + .getResourceLocator().locate(namespace, commitTime); + + // The name of the subdirectory on which the properties and RDF + // data will be written. + final File kbdir = new File(outdir, munge(namespace)); + + // Choose an appropriate RDFFormat. + RDFFormat fmt = format; + if (fmt == null) { + // Choose an appropriate format. + if (kb.isStatementIdentifiers()) { + fmt = RDFFormat.RDFXML; + } else if (kb.isQuads()) { + fmt = RDFFormat.TRIX; + } else { + fmt = RDFFormat.RDFXML; + } + } + System.out.println("Exporting " + namespace + " as " + + fmt.getName() + " on " + kbdir); + if (!nothing) { + // Export KB. + new ExportKB(kb, kbdir, fmt, includeInferred).export(); + } + } + + // Success. + System.out.println("Done"); + + } finally { + indexManager.close(); + } + + } + + private static void usage() { + + System.err.println("usage: [options] propertyFile namespace*"); + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/ExportKB.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Added: branches/TERMS_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/service/jini/util/CreateKB.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/service/jini/util/CreateKB.java (rev 0) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/service/jini/util/CreateKB.java 2011-07-26 15:11:55 UTC (rev 4970) @@ -0,0 +1,221 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Jan 10, 2009 + */ + +package com.bigdata.service.jini.util; + +import java.io.IOException; +import java.util.Properties; +import java.util.concurrent.ExecutionException; + +import net.jini.config.ConfigurationException; + +import org.apache.log4j.Logger; + +import com.bigdata.config.Configuration; +import com.bigdata.journal.ITx; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.store.ScaleOutTripleStore; +import com.bigdata.service.jini.JiniClient; +import com.bigdata.service.jini.JiniFederation; +import com.bigdata.service.jini.master.TaskMaster; +import com.bigdata.util.NV; + +/** + * Utility to create a scale-out KB instance. You must specify an appropriate + * security policy. For example: + * <pre> + * -Djava.security.policy=policy.all + * </pre> + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class CreateKB { + + private static final Logger log = Logger.getLogger(CreateKB.class); + + /** + * The name of the component in the jini configuration file for this class. + */ + protected static final String COMPONENT = CreateKB.class.getName(); + + /** + * Configuration options understood by this utility. + */ + public interface ConfigurationOptions { + + /** + * The KB namespace. This option must be specified for the + * {@value CreateKB#COMPONENT} in the {@link Configuration}. + */ + String NAMESPACE = "namespace"; + + /** + * An {@link NV}[] providing the configuration properties for the KB. + * This option must be specified for the + * {@link com.bigdata.service.jini.JiniClient} component in the + * {@link Configuration}. The {@link NV}[] will be translated into a + * {@link Properties} object by the {@link JiniClient}. + * + * @see JiniClient + */ + String PROPERTIES = "properties"; + + } + + private final JiniFederation<?> fed; + + private CreateKB(final JiniFederation<?> fed) { + + if(fed == null) + throw new IllegalArgumentException(); + + this.fed = fed; + + } + + /** + * Create the {@link AbstractTripleStore} specified by + * {@link ConfigurationOptions#NAMESPACE} using the <code>properties</code> + * associated with the {@link TaskMaster.JobState#component}. + * + * @return <code>true</code> if a new KB instance was created. + * <code>false</code> if the named KB instance already exists. + */ + protected boolean createTripleStore() throws ConfigurationException { + + /* + * Pick up properties configured for the client as defaults. + * + * You must specify those properties using NV[] for this component. + */ + final Properties properties = fed.getClient().getProperties( + COMPONENT); + + final String namespace = (String) fed + .getClient() + .getConfiguration() + .getEntry(COMPONENT, ConfigurationOptions.NAMESPACE, + String.class); + + System.out.println("KB namespace=" + namespace); + + // Locate the resource declaration (aka "open"). This tells us if it + // exists already. + AbstractTripleStore tripleStore = (AbstractTripleStore) fed + .getResourceLocator().locate(namespace, ITx.UNISOLATED); + + if (tripleStore != null) { + + System.out.println("exists: " + namespace); + + // Triple store already exists with that namespace. + return false; // pre-existing. + + } + + /* + * Create the KB instance. + */ + + if (log.isInfoEnabled()) { + log.info("Creating KB instance: namespace="+namespace); + log.info("Properties=" + properties.toString()); + } + + tripleStore = new ScaleOutTripleStore(fed, namespace, ITx.UNISOLATED, + properties); + + // create the triple store. + tripleStore.create(); + + System.out.println("Created tripleStore: " + namespace); + + // show #of statements in the newly create triple store (e.g., any axioms). + System.out.println("axiomCount=" + tripleStore.getStatementCount()); + + // New KB instance was created. + return true; + + } + + /** + * Creates a KB instance. + * <p> + * Configuration options use {@link #COMPONENT} as their namespace. The + * following options are defined: + * <dl> + * + * <dt>{@value ConfigurationOptions#NAMESPACE}</dt> + * <dd>The namespace of the KB instance.</dd> + * + * <dt>{@value ConfigurationOptions#PROPERTIES}</dt> + * <dd>The properties used to create the KB instance expressed as an + * {@link NV}[].</dd> + * + * </dl> + * + * @param args + * Configuration file and optional overrides. + * + * @see ConfigurationOptions + * + * @throws ConfigurationException + * @throws InterruptedException + * @throws ExecutionException + */ + public static void main(final String[] args) throws InterruptedException, + ConfigurationException, IOException, ExecutionException { + + final JiniFederation<?> fed = JiniClient.newInstance(args).connect(); + + /* + * Install a shutdown hook (normal kill will trigger this hook). + */ + Runtime.getRuntime().addShutdownHook(new Thread() { + + public void run() { + + fed.shutdownNow(); + + } + + }); + + try { + + new CreateKB(fed).createTripleStore(); + + } finally { + + fed.shutdown(); + + } + + } + +} Property changes on: branches/TERMS_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/service/jini/util/CreateKB.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Added: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/ExportKB.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/ExportKB.java (rev 0) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/ExportKB.java 2011-07-26 15:11:55 UTC (rev 4970) @@ -0,0 +1,594 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Jul 26, 2011 + */ + +package com.bigdata.rdf.sail; + +import info.aduna.iteration.CloseableIteration; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Enumeration; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.zip.GZIPOutputStream; + +import org.apache.log4j.Logger; +import org.openrdf.model.Resource; +import org.openrdf.model.Statement; +import org.openrdf.rio.RDFFormat; +import org.openrdf.rio.RDFHandlerException; +import org.openrdf.rio.RDFWriter; +import org.openrdf.rio.RDFWriterRegistry; +import org.openrdf.sail.SailConnection; +import org.openrdf.sail.SailException; + +import com.bigdata.Banner; +import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.Journal; +import com.bigdata.rawstore.IRawStore; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.relation.RelationSchema; +import com.bigdata.relation.locator.ILocatableResource; +import com.bigdata.sparse.ITPS; + +/** + * Utility class for exporting the configuration properties and data associated + * with one or more KBs on a {@link Journal}. + * + * @see <a + * href="https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=DataMigration">Data + * Migration</a>. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class ExportKB { + + private static final Logger log = Logger.getLogger(ExportKB.class); + + /** + * The KB to be exported. + */ + private final AbstractTripleStore kb; + + /** + * The namespace associated with that KB. + */ + private final String namespace; + + /** + * The directory into which the KB properties and data will be written. + */ + private final File kbdir; + + /** + * The {@link RDFFormat} which will be used when the data are exported. + */ + private final RDFFormat format; + + /** + * When <code>true</code> inferences and axioms will also be exported. + * Otherwise just the explicitly given (aka told) triples/quads will be + * exported. + */ + private final boolean includeInferred; + + /** + * + * @param kb + * The KB instance. + * @param kbdir + * The directory into which the exported properties and RDF data + * will be written. + * @param format + * The {@link RDFFormat} to use when exporting the data. + * @param includeInferred + * When <code>true</code> inferences and axioms will also be + * exported. Otherwise just the explicitly given (aka told) + * triples/quads will be exported. + */ + public ExportKB(final AbstractTripleStore kb, final File kbdir, + final RDFFormat format, final boolean includeInferred) { + + if (kb == null) + throw new IllegalArgumentException("KB not specified."); + + if (kbdir == null) + throw new IllegalArgumentException( + "Output directory not specified."); + + if (format == null) + throw new IllegalArgumentException("RDFFormat not specified."); + + if (kb.isStatementIdentifiers() && !RDFFormat.RDFXML.equals(format)) + throw new IllegalArgumentException( + "SIDs mode requires RDF/XML interchange."); + + if (kb.isQuads() && !format.supportsContexts()) + throw new IllegalArgumentException( + "RDFFormat does not support quads: " + format); + + this.kb = kb; + + this.namespace = kb.getNamespace(); + + this.kbdir = kbdir; + + this.format = format; + + this.includeInferred = includeInferred; + + } + + /** + * Munge a name index so that it is suitable for use in a filesystem. In + * particular, any non-word characters are converted to an underscore + * character ("_"). This gets rid of all punctuation characters and + * whitespace in the index name itself, but will not translate unicode + * characters. + * + * @param s + * The name of the scale-out index. + * + * @return A string suitable for inclusion in a filename. + */ + static private String munge(final String s) { + + return s.replaceAll("[\\W]", "_"); + + } + + /** + * Export the properties and data for the KB. + * + * @throws IOException + * @throws SailException + * @throws RDFHandlerException + */ + public void export() throws IOException, SailException, RDFHandlerException { + + System.out.println("Effective output directory: " + kbdir); + + prepare(); + + exportProperties(); + + exportData(); + + } + + public void prepare() throws IOException { + if (!kbdir.exists()) { + if (!kbdir.mkdirs()) + throw new IOException("Could not create directory: " + kbdir); + } + } + + /** + * Export the configuration properties for the kb. + * + * @throws IOException + */ + public void exportProperties() throws IOException { + prepare(); + // Prepare a comment block for the properties file. + final StringBuilder comments = new StringBuilder( + "Configuration properties.\n"); + if (kb.getIndexManager() instanceof IRawStore) { + comments.append("source=" + + ((IRawStore) kb.getIndexManager()).getFile() + "\n"); + comments.append("namespace=" + namespace + "\n"); + // The timestamp of the KB view. + comments.append("timestamp=" + kb.getTimestamp() + "\n"); + // The date and time when the KB export began. (Automatically added by Java). +// comments.append("exportDate=" + new Date() + "\n"); + // The approximate #of statements (includes axioms, inferences, and + // deleted statements). + comments.append("fastStatementCount=" + + kb.getStatementCount(false/* exact */) + "\n"); + // The #of URIs in the lexicon indices. + comments.append("uriCount=" + kb.getURICount() + "\n"); + // The #of Literals in the lexicon indices. + comments.append("literalCount=" + kb.getLiteralCount() + "\n"); + // The #of blank nodes in the lexicon indices. + comments.append("bnodeCount=" + kb.getBNodeCount() + "\n"); + } + // Flatten the properties so inherited defaults will also be written + // out. + final Properties properties = flatCopy(kb.getProperties()); + // Write the properties file. + final File file = new File(kbdir, "kb.properties"); + System.out.println("Writing " + file); + final OutputStream os = new BufferedOutputStream(new FileOutputStream( + file)); + try { + properties.store(os, comments.toString()); + } finally { + os.close(); + } + } + + /** + * Exports all told statements associated with the last commit point for the + * KB. + * + * @throws IOException + * @throws SailException + * @throws RDFHandlerException + */ + public void exportData() throws IOException, SailException, + RDFHandlerException { + prepare(); + final BigdataSail sail = new BigdataSail(kb); + try { + sail.initialize(); + final SailConnection conn = sail.getReadOnlyConnection(); + try { + final CloseableIteration<? extends Statement, SailException> itr = conn + .getStatements(null/* s */, null/* p */, null/* o */, + includeInferred, new Resource[] {}/* contexts */); + try { + final File file = new File(kbdir, "data." + + format.getDefaultFileExtension()+".gz"); + System.out.println("Writing " + file); + final OutputStream os = new GZIPOutputStream( + new FileOutputStream(file)); + try { + final RDFWriter writer = RDFWriterRegistry + .getInstance().get(format).getWriter(os); + writer.startRDF(); + while (itr.hasNext()) { + final Statement stmt = itr.next(); + writer.handleStatement(stmt); + } + writer.endRDF(); + } finally { + os.close(); + } + } finally { + itr.close(); + } + } finally { + conn.close(); + } + } finally { + sail.shutDown(); + } + + } + + /** + * Return a list of the namespaces for the {@link AbstractTripleStore}s + * registered against the bigdata instance. + */ + static List<String> getNamespaces(final IIndexManager indexManager) { + + // the triple store namespaces. + final List<String> namespaces = new LinkedList<String>(); + + // scan the relation schema in the global row store. + @SuppressWarnings("unchecked") + final Iterator<ITPS> itr = (Iterator<ITPS>) indexManager + .getGlobalRowStore().rangeIterator(RelationSchema.INSTANCE); + + while (itr.hasNext()) { + + // A timestamped property value set is a logical row with + // timestamped property values. + final ITPS tps = itr.next(); + + // If you want to see what is in the TPS, uncomment this. +// System.err.println(tps.toString()); + + // The namespace is the primary key of the logical row for the + // relation schema. + final String namespace = (String) tps.getPrimaryKey(); + + // Get the name of the implementation class + // (AbstractTripleStore, SPORelation, LexiconRelation, etc.) + final String className = (String) tps.get(RelationSchema.CLASS) + .getValue(); + + try { + final Class<?> cls = Class.forName(className); + if (AbstractTripleStore.class.isAssignableFrom(cls)) { + // this is a triple store (vs something else). + namespaces.add(namespace); + } + } catch (ClassNotFoundException e) { + log.error(e,e); + } + + } + + return namespaces; + + } + + /** + * Load a {@link Properties} object from a file. + * + * @param file + * The property file. + * + * @return The {@link Properties}. + * + * @throws IOException + */ + static private Properties loadProperties(final File file) + throws IOException { + + final Properties p = new Properties(); + + final InputStream is = new BufferedInputStream( + new FileInputStream(file)); + + try { + + p.load(is); + + } finally { + + is.close(); + } + + return p; + + } + + static public Properties flatCopy(final Properties props) { + + final Properties tmp = new Properties(); + + tmp.putAll(flatten(props)); + + return tmp; + + } + + private static Map<String,String> flatten(final Properties properties) { + + if (properties == null) { + + throw new IllegalArgumentException(); + + } + + final Map<String,String> out = new LinkedHashMap<String, String>(); + + final Enumeration<?> e = properties.propertyNames(); + + while (e.hasMoreElements()) { + + final String property = (String) e.nextElement(); + + final String propertyValue = properties.getProperty(property); + + if (propertyValue != null) + out.put(property, propertyValue); + + } + + return out; + + } + + /** + * Export one or more KBs from a Journal. The only required argument is the + * name of the properties file for the Journal. By default all KB instances + * found on the journal will be exported into the current working directory. + * Each KB will be written into a subdirectory based on the namespace of the + * KB. + * + * @param args + * <code>[options] propertyFile namespace*</code> where + * <i>options</i> is any of: + * <dl> + * <dt>-outdir</dt> + * <dd>The output directory (default is the current working + * directory)</dd> + * <dt>-format</dt> + * <dd>The {@link RDFFormat} which will be used to export the + * data. If not specified then an appropriate format will be + * selected based on the KB configuration. The default for + * triples or SIDs is {@link RDFFormat#RDFXML}. The default for + * quads is {@link RDFFormat#TRIX}.</dd> + * <dt>-includeInferred</dt> + * <dd>Normally only the told triples/quads will be exported. + * This option may be given to export the axioms and inferences + * as well as the told triples/quads.</dd> + * <dt>-n</dt> + * <dd>Do nothing, but show the KBs which would be exported.</dd> + * <dt>-help</dt> + * <dd>Display the usage message and exit.</dd> + * </dl> + * where <i>propertyFile</i> is the properties file for the + * Journal.<br/> + * where <i>namespace</i> is zero or more namespaces of KBs to + * export from the Journal. If no namespace is given, then all + * KBs on the Journal will be exported. + * + * @throws Exception + */ + public static void main(final String[] args) throws Exception { + + Banner.banner(); + + /* + * Defaults for options. + */ + boolean nothing = false; + boolean includeInferred = false; + RDFFormat format = null; + File propertyFile = null; + File outdir = new File("."); + final List<String> namespaces = new LinkedList<String>(); + + // Parse options. + int i = 0; + for (; i < args.length; ) { + final String s = args[i]; + if (!s.startsWith("-")) { + // end of options. + break; + } + i++; + if(s.equals("-n")) { + nothing = true; + } else if(s.equals("-help")) { + usage(); + System.exit(0); + } else if(s.equals("-format")) { + format = RDFFormat.valueOf(args[i++]); + } else if(s.equals("-includeInferred")) { + includeInferred = true; + } else if (s.equals("-outdir")) { + outdir = new File(args[i++]); + } else { + System.err.println("Unknown option: " + s); + usage(); + System.exit(1); + } + } + + // properties file. + if (i == args.length) { + usage(); + System.exit(1); + } else { + propertyFile = new File(args[i++]); + if (!propertyFile.exists()) { + System.err.println("No such file: " + propertyFile); + System.exit(1); + } + } + + // Load the properties from the file. + final Properties properties = loadProperties(propertyFile); + + /* + * Allow override of select options. + */ + { + final String[] overrides = new String[] { + // Journal options. + com.bigdata.journal.Options.FILE, + }; + for (String s : overrides) { + if (System.getProperty(s) != null) { + // Override/set from the environment. + final String v = System.getProperty(s); + System.out.println("Using: " + s + "=" + v); + properties.setProperty(s, v); + } + } + } + + // Open the journal. + final Journal indexManager = new Journal(properties); + try { + + // The last commit time on the store. + final long commitTime = indexManager.getLastCommitTime(); + + if (i == args.length) { + // Use all namespaces. + namespaces.addAll(getNamespaces(indexManager)); + } else { + // use just the given namespace(s). + for (; i < args.length;) { + final String namespace = args[i++]; + // Verify that the KB exists. + final ILocatableResource<?> kb = indexManager + .getResourceLocator().locate(namespace, commitTime); + if (kb == null) { + throw new RuntimeException("No such namespace: " + + namespace); + } + if (!(kb instanceof AbstractTripleStore)) { + throw new RuntimeException("Not a KB: " + namespace); + } + namespaces.add(namespace); + } + } + + for (String namespace : namespaces) { + + // Get KB view. + final AbstractTripleStore kb = (AbstractTripleStore) indexManager + .getResourceLocator().locate(namespace, commitTime); + + // The name of the subdirectory on which the properties and RDF + // data will be written. + final File kbdir = new File(outdir, munge(namespace)); + + // Choose an appropriate RDFFormat. + RDFFormat fmt = format; + if (fmt == null) { + // Choose an appropriate format. + if (kb.isStatementIdentifiers()) { + fmt = RDFFormat.RDFXML; + } else if (kb.isQuads()) { + fmt = RDFFormat.TRIX; + } else { + fmt = RDFFormat.RDFXML; + } + } + System.out.println("Exporting " + namespace + " as " + + fmt.getName() + " on " + kbdir); + if (!nothing) { + // Export KB. + new ExportKB(kb, kbdir, fmt, includeInferred).export(); + } + } + + // Success. + System.out.println("Done"); + + } finally { + indexManager.close(); + } + + } + + private static void usage() { + + System.err.println("usage: [options] propertyFile namespace*"); + + } + +} Property changes on: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/ExportKB.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-08-03 11:02:18
|
Revision: 5011 http://bigdata.svn.sourceforge.net/bigdata/?rev=5011&view=rev Author: thompsonbry Date: 2011-08-03 11:02:12 +0000 (Wed, 03 Aug 2011) Log Message: ----------- Modified FixedAllocator to invoke checkBits() using conditional logging @ DEBUG rather than assert since we frequently run with asserts enabled and checkBits() might add some overhead. Also removed 2 computed but unused values and made a 2 method arguments final. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2011-08-02 18:46:30 UTC (rev 5010) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2011-08-03 11:02:12 UTC (rev 5011) @@ -151,7 +151,7 @@ */ private ArrayList m_freeList; - public void setFreeList(ArrayList list) { + public void setFreeList(final ArrayList list) { m_freeList = list; if (!m_pendingContextCommit && hasFree()) { @@ -177,7 +177,8 @@ throw new IllegalStateException("Already pending commit"); } - assert checkBits(); + if (log.isDebugEnabled()) + checkBits(); if (context == null && m_context != null) { // restore commit bits in AllocBlocks @@ -206,8 +207,9 @@ m_contextThread = null; } - assert checkBits(); - + if (log.isDebugEnabled()) + checkBits(); + } /** @@ -326,7 +328,8 @@ } - assert checkBits(); + if (log.isDebugEnabled()) + checkBits(); return buf; } catch (IOException e) { @@ -338,7 +341,7 @@ int freeBits = 0; final Iterator<AllocBlock> iter = m_allocBlocks.iterator(); - final int blockSize = m_bitSize * 32 * m_size; +// final int blockSize = m_bitSize * 32 * m_size; while (iter.hasNext()) { final AllocBlock block = iter.next(); for (int i = 0; i < m_bitSize; i++) { @@ -353,7 +356,7 @@ int freeBits = 0; final Iterator<AllocBlock> iter = m_allocBlocks.iterator(); - final int blockSize = m_bitSize * 32 * m_size; +// final int blockSize = m_bitSize * 32 * m_size; while (iter.hasNext()) { final AllocBlock block = iter.next(); for (int i = 0; i < m_bitSize; i++) { @@ -641,7 +644,8 @@ if (tmp && !m_sessionActive) throw new AssertionError(); try { - assert checkBits(); + if (log.isDebugEnabled()) + checkBits(); if (((AllocBlock) m_allocBlocks.get(block)) .freeBit(offset % nbits, m_sessionActive && !overideSession)) { // bit adjust @@ -678,7 +682,8 @@ throw new IllegalArgumentException("IAE with address: " + addr + ", size: " + size + ", context: " + (m_context == null ? -1 : m_context.hashCode()), iae); } - assert checkBits(); + if (log.isDebugEnabled()) + checkBits(); return true; } else if (addr >= m_startAddr && addr < m_endAddr) { @@ -689,14 +694,16 @@ if (block.free(addr, m_size)) { m_freeTransients++; - assert checkBits(); + if (log.isDebugEnabled()) + checkBits(); return true; } } } - assert checkBits(); + if (log.isDebugEnabled()) + checkBits(); return false; } @@ -804,7 +811,8 @@ return 0; } } finally { - assert checkBits(); + if (log.isDebugEnabled()) + checkBits(); } } @@ -816,7 +824,7 @@ return m_freeBits > 0; } - public void addAddresses(ArrayList addrs) { + public void addAddresses(final ArrayList addrs) { final Iterator blocks = m_allocBlocks.iterator(); Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2011-08-02 18:46:30 UTC (rev 5010) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2011-08-03 11:02:12 UTC (rev 5011) @@ -151,7 +151,7 @@ */ private ArrayList m_freeList; - public void setFreeList(ArrayList list) { + public void setFreeList(final ArrayList list) { m_freeList = list; if (!m_pendingContextCommit && hasFree()) { @@ -177,7 +177,8 @@ throw new IllegalStateException("Already pending commit"); } - assert checkBits(); + if (log.isDebugEnabled()) + checkBits(); if (context == null && m_context != null) { // restore commit bits in AllocBlocks @@ -206,8 +207,9 @@ m_contextThread = null; } - assert checkBits(); - + if (log.isDebugEnabled()) + checkBits(); + } /** @@ -326,7 +328,8 @@ } - assert checkBits(); + if (log.isDebugEnabled()) + checkBits(); return buf; } catch (IOException e) { @@ -338,7 +341,7 @@ int freeBits = 0; final Iterator<AllocBlock> iter = m_allocBlocks.iterator(); - final int blockSize = m_bitSize * 32 * m_size; +// final int blockSize = m_bitSize * 32 * m_size; while (iter.hasNext()) { final AllocBlock block = iter.next(); for (int i = 0; i < m_bitSize; i++) { @@ -353,7 +356,7 @@ int freeBits = 0; final Iterator<AllocBlock> iter = m_allocBlocks.iterator(); - final int blockSize = m_bitSize * 32 * m_size; +// final int blockSize = m_bitSize * 32 * m_size; while (iter.hasNext()) { final AllocBlock block = iter.next(); for (int i = 0; i < m_bitSize; i++) { @@ -641,7 +644,8 @@ if (tmp && !m_sessionActive) throw new AssertionError(); try { - assert checkBits(); + if (log.isDebugEnabled()) + checkBits(); if (((AllocBlock) m_allocBlocks.get(block)) .freeBit(offset % nbits, m_sessionActive && !overideSession)) { // bit adjust @@ -678,7 +682,8 @@ throw new IllegalArgumentException("IAE with address: " + addr + ", size: " + size + ", context: " + (m_context == null ? -1 : m_context.hashCode()), iae); } - assert checkBits(); + if (log.isDebugEnabled()) + checkBits(); return true; } else if (addr >= m_startAddr && addr < m_endAddr) { @@ -689,14 +694,16 @@ if (block.free(addr, m_size)) { m_freeTransients++; - assert checkBits(); + if (log.isDebugEnabled()) + checkBits(); return true; } } } - assert checkBits(); + if (log.isDebugEnabled()) + checkBits(); return false; } @@ -804,7 +811,8 @@ return 0; } } finally { - assert checkBits(); + if (log.isDebugEnabled()) + checkBits(); } } @@ -816,7 +824,7 @@ return m_freeBits > 0; } - public void addAddresses(ArrayList addrs) { + public void addAddresses(final ArrayList addrs) { final Iterator blocks = m_allocBlocks.iterator(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-08-03 11:29:40
|
Revision: 5012 http://bigdata.svn.sourceforge.net/bigdata/?rev=5012&view=rev Author: thompsonbry Date: 2011-08-03 11:29:33 +0000 (Wed, 03 Aug 2011) Log Message: ----------- Workaround for [1]. There are three places where we use Logger.setLevel(). Of these, the most critical is in Banner where it is used to raise the log level for com.bigdata to WARN if it has not been explicitly set. This is vital because logging at the default log level causes a tremendous performance hit due to conditional logging in the B+Tree and other critical packages. [1] documents an issue where the SLF4J bridge does not implement Logger.setLevel(). As a workaround, Banner.banner() will the following message log @ ERROR if the com.bigdata log level has not been explicitly configured: Unable to raise the default log level to WARN. Logging is NOT properly configured. Severe performance penalty will result. The other two locations where Logger.setLevel() is used are not critical as they only provide for additional debugging information rather than providing a precondition for satisfactory performance. They are DumpIndexSegment and CounterSetHTTPDServer. In both cases, the (wrapped) NoSuchMethodException is trapped and an error is logged out. [1] https://sourceforge.net/apps/trac/bigdata/ticket/362 (SLF4J - LOG4J bridge does not implement setLevel). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/counters/httpd/CounterSetHTTPDServer.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/Banner.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/counters/httpd/CounterSetHTTPDServer.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java 2011-08-03 11:02:12 UTC (rev 5011) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java 2011-08-03 11:29:33 UTC (rev 5012) @@ -37,6 +37,7 @@ import org.apache.system.SystemUtil; import com.bigdata.counters.AbstractStatisticsCollector; +import com.bigdata.util.InnerCause; /** * Class has a static method which writes a copyright banner on stdout once per @@ -90,14 +91,43 @@ /* * Since there is no default for com.bigdata, default to WARN. */ + try { + + log.setLevel(Level.WARN); + + if (!quiet) + log.warn("Defaulting log level to WARN: " + + log.getName()); + + } catch (Throwable t) { - log.setLevel(Level.WARN); + /* + * Note: The SLF4J bridge can cause a NoSuchMethodException + * to be thrown out of Logger.setLevel(). We trap this + * exception and log a message @ ERROR. It is critical that + * bigdata logging is properly configured as logging at INFO + * for com.bigdata will cause a tremendous loss of + * performance. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/362 + */ + if (InnerCause.isInnerCause(t, NoSuchMethodException.class)) { - if (!quiet) - log.warn("Defaulting log level to WARN: " + log.getName()); + log.error("Unable to raise the default log level to WARN." + + " Logging is NOT properly configured." + + " Severe performance penalty will result."); - } + } else { + + // Something else that we are not expecting. + throw new RuntimeException(t); + + } + + } + } // if(log.getLevel() == null) + /* * Note: I have modified this to test for disabled registration and * to use reflection in order to decouple the JMX dependency for Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java 2011-08-03 11:02:12 UTC (rev 5011) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java 2011-08-03 11:29:33 UTC (rev 5012) @@ -38,6 +38,7 @@ import com.bigdata.btree.IndexSegment.ImmutableNodeFactory.ImmutableLeaf; import com.bigdata.io.DirectBufferPool; import com.bigdata.rawstore.IRawStore; +import com.bigdata.util.InnerCause; /** * Utility to examine the context of an {@link IndexSegmentStore}. @@ -94,9 +95,26 @@ final Level level = Level.toLevel(args[++i]); System.out.println("Setting log level: "+level); - + // turn up the dumpLog level so that we can see the output. - AbstractBTree.dumpLog.setLevel(level); + try { + AbstractBTree.dumpLog.setLevel(level); + } catch (Throwable t) { + /* + * Note: The SLF4J logging bridge can cause a + * NoSuchMethodException to be thrown here. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/362 + */ + if (InnerCause.isInnerCause(t, + NoSuchMethodException.class)) { + log.error("Could not set log level : " + + AbstractBTree.dumpLog.getName()); + } else { + // Some other problem. + throw new RuntimeException(t); + } + } } else { Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/counters/httpd/CounterSetHTTPDServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/counters/httpd/CounterSetHTTPDServer.java 2011-08-03 11:02:12 UTC (rev 5011) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/counters/httpd/CounterSetHTTPDServer.java 2011-08-03 11:29:33 UTC (rev 5012) @@ -37,6 +37,7 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; +import com.bigdata.btree.AbstractBTree; import com.bigdata.counters.CounterSet; import com.bigdata.counters.ICounterSetAccess; import com.bigdata.counters.PeriodEnum; @@ -44,6 +45,7 @@ import com.bigdata.counters.render.XHTMLRenderer; import com.bigdata.service.Event; import com.bigdata.service.IService; +import com.bigdata.util.InnerCause; import com.bigdata.util.httpd.AbstractHTTPD; import com.bigdata.util.httpd.NanoHTTPD; @@ -144,15 +146,34 @@ System.out.println("Setting server and service log levels: "+level); - // set logging level on the server. - CounterSetHTTPDServer.log.setLevel(level); - - // set logging level for the view. - Logger.getLogger(XHTMLRenderer.class).setLevel(level); + try { - // set logging level on the service. - Logger.getLogger(NanoHTTPD.class).setLevel(level); - + // set logging level on the server. + CounterSetHTTPDServer.log.setLevel(level); + + // set logging level for the view. + Logger.getLogger(XHTMLRenderer.class).setLevel(level); + + // set logging level on the service. + Logger.getLogger(NanoHTTPD.class).setLevel(level); + + } catch (Throwable t) { + /* + * Note: The SLF4J logging bridge can cause a + * NoSuchMethodException to be thrown here. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/362 + */ + if (InnerCause.isInnerCause(t, + NoSuchMethodException.class)) { + log.error("Could not set log level : " + + AbstractBTree.dumpLog.getName()); + } else { + // Some other problem. + throw new RuntimeException(t); + } + } + } else if (arg.equals("-events")) { QueryUtil.readEvents(service, new File(args[++i])); Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/Banner.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/Banner.java 2011-08-03 11:02:12 UTC (rev 5011) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/Banner.java 2011-08-03 11:29:33 UTC (rev 5012) @@ -37,6 +37,7 @@ import org.apache.system.SystemUtil; import com.bigdata.counters.AbstractStatisticsCollector; +import com.bigdata.util.InnerCause; /** * Class has a static method which writes a copyright banner on stdout once per @@ -90,14 +91,43 @@ /* * Since there is no default for com.bigdata, default to WARN. */ + try { + + log.setLevel(Level.WARN); + + if (!quiet) + log.warn("Defaulting log level to WARN: " + + log.getName()); + + } catch (Throwable t) { - log.setLevel(Level.WARN); + /* + * Note: The SLF4J bridge can cause a NoSuchMethodException + * to be thrown out of Logger.setLevel(). We trap this + * exception and log a message @ ERROR. It is critical that + * bigdata logging is properly configured as logging at INFO + * for com.bigdata will cause a tremendous loss of + * performance. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/362 + */ + if (InnerCause.isInnerCause(t, NoSuchMethodException.class)) { - if (!quiet) - log.warn("Defaulting log level to WARN: " + log.getName()); + log.error("Unable to raise the default log level to WARN." + + " Logging is NOT properly configured." + + " Severe performance penalty will result."); - } + } else { + + // Something else that we are not expecting. + throw new RuntimeException(t); + + } + + } + } // if(log.getLevel() == null) + /* * Note: I have modified this to test for disabled registration and * to use reflection in order to decouple the JMX dependency for Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java 2011-08-03 11:02:12 UTC (rev 5011) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java 2011-08-03 11:29:33 UTC (rev 5012) @@ -38,6 +38,7 @@ import com.bigdata.btree.IndexSegment.ImmutableNodeFactory.ImmutableLeaf; import com.bigdata.io.DirectBufferPool; import com.bigdata.rawstore.IRawStore; +import com.bigdata.util.InnerCause; /** * Utility to examine the context of an {@link IndexSegmentStore}. @@ -96,7 +97,24 @@ System.out.println("Setting log level: "+level); // turn up the dumpLog level so that we can see the output. - AbstractBTree.dumpLog.setLevel(level); + try { + AbstractBTree.dumpLog.setLevel(level); + } catch (Throwable t) { + /* + * Note: The SLF4J logging bridge can cause a + * NoSuchMethodException to be thrown here. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/362 + */ + if (InnerCause.isInnerCause(t, + NoSuchMethodException.class)) { + log.error("Could not set log level : " + + AbstractBTree.dumpLog.getName()); + } else { + // Some other problem. + throw new RuntimeException(t); + } + } } else { Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/counters/httpd/CounterSetHTTPDServer.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/counters/httpd/CounterSetHTTPDServer.java 2011-08-03 11:02:12 UTC (rev 5011) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/counters/httpd/CounterSetHTTPDServer.java 2011-08-03 11:29:33 UTC (rev 5012) @@ -37,6 +37,7 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; +import com.bigdata.btree.AbstractBTree; import com.bigdata.counters.CounterSet; import com.bigdata.counters.ICounterSetAccess; import com.bigdata.counters.PeriodEnum; @@ -44,6 +45,7 @@ import com.bigdata.counters.render.XHTMLRenderer; import com.bigdata.service.Event; import com.bigdata.service.IService; +import com.bigdata.util.InnerCause; import com.bigdata.util.httpd.AbstractHTTPD; import com.bigdata.util.httpd.NanoHTTPD; @@ -144,14 +146,33 @@ System.out.println("Setting server and service log levels: "+level); - // set logging level on the server. - CounterSetHTTPDServer.log.setLevel(level); - - // set logging level for the view. - Logger.getLogger(XHTMLRenderer.class).setLevel(level); + try { - // set logging level on the service. - Logger.getLogger(NanoHTTPD.class).setLevel(level); + // set logging level on the server. + CounterSetHTTPDServer.log.setLevel(level); + + // set logging level for the view. + Logger.getLogger(XHTMLRenderer.class).setLevel(level); + + // set logging level on the service. + Logger.getLogger(NanoHTTPD.class).setLevel(level); + + } catch (Throwable t) { + /* + * Note: The SLF4J logging bridge can cause a + * NoSuchMethodException to be thrown here. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/362 + */ + if (InnerCause.isInnerCause(t, + NoSuchMethodException.class)) { + log.error("Could not set log level : " + + AbstractBTree.dumpLog.getName()); + } else { + // Some other problem. + throw new RuntimeException(t); + } + } } else if (arg.equals("-events")) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-08-03 12:42:06
|
Revision: 5013 http://bigdata.svn.sourceforge.net/bigdata/?rev=5013&view=rev Author: thompsonbry Date: 2011-08-03 12:41:59 +0000 (Wed, 03 Aug 2011) Log Message: ----------- Per [1], the code path which replaces the graph variable with a constant corresponding to the sole known binding has been effectively disabled by comparing DataSetSummary.nknown with -1 rather than with 1. We currently lack a means to convey a binding for the graph variable without rewriting the predicate such that the graph variable is effectively removed from the query. However, I think that we might have a solution for that when we get into the RTO integration. In the meanwhile, the comparison with -1 ensures that we continue to produce the correct solutions. Enabling this code path in the future might provide a slight performance gain. I have added a unit test to TestNamedGraphs to protect against regression for this ticket. The test fails if the code path is enabled and passes when the code path is disabled. The changes have been committed to both the 1.0.0 maintenance branch and to the development branch. [1] https://sourceforge.net/apps/trac/bigdata/ticket/359 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNamedGraphs.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNamedGraphs.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java 2011-08-03 11:29:33 UTC (rev 5012) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java 2011-08-03 12:41:59 UTC (rev 5013) @@ -1252,10 +1252,23 @@ } - if (summary.nknown == 1) { + if (summary.nknown == -1) { +// if (summary.nknown == 1) { /* * The dataset contains exactly one graph. Bind C. + * + * FIXME This code path has been effectively disabled per the ticket + * identified immediately below by comparing nknown with -1 rather + * than with 1. We currently lack a means to convey a binding for + * the graph variable without rewriting the predicate such that the + * graph variable is effectively removed from the query. However, I + * think that we might have a solution for that when we get into the + * RTO integration. In the meanwhile, the comparison with -1 ensures + * that we continue to produce the correct solutions. Enabling this + * code path in the future might provide a slight performance gain. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/359 */ pred = pred.asBound((IVariable<?>) pred.get(3), Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNamedGraphs.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNamedGraphs.java 2011-08-03 11:29:33 UTC (rev 5012) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNamedGraphs.java 2011-08-03 12:41:59 UTC (rev 5013) @@ -48,6 +48,8 @@ import org.openrdf.query.impl.BindingImpl; import org.openrdf.repository.RepositoryException; import org.openrdf.sail.SailException; + +import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.store.BD; /** @@ -60,7 +62,7 @@ */ public class TestNamedGraphs extends QuadsTestCase { - protected static final Logger log = Logger.getLogger(TestNamedGraphs.class); + private static final Logger log = Logger.getLogger(TestNamedGraphs.class); /** * @@ -78,44 +80,43 @@ /** * The foaf: namespace. */ - final String FOAF = "http://xmlns.com/foaf/0.1/"; + private static final String FOAF = "http://xmlns.com/foaf/0.1/"; /** * foaf:name */ - final URI FOAF_NAME = new URIImpl(FOAF+"name"); + private static final URI FOAF_NAME = new URIImpl(FOAF+"name"); /** * foaf:mbox */ - final URI FOAF_MBOX = new URIImpl(FOAF+"mbox"); + private static final URI FOAF_MBOX = new URIImpl(FOAF+"mbox"); /** * foaf:nick */ - final URI FOAF_NICK = new URIImpl(FOAF+"nick"); + private static final URI FOAF_NICK = new URIImpl(FOAF+"nick"); /** * foaf:PersonalProfileDocument */ - final URI FOAF_PPD = new URIImpl(FOAF+"PersonalProfileDocument"); + private static final URI FOAF_PPD = new URIImpl(FOAF+"PersonalProfileDocument"); /** * foaf:knows */ - final URI FOAF_KNOWS = new URIImpl(FOAF+"knows"); + private static final URI FOAF_KNOWS = new URIImpl(FOAF+"knows"); /** * The dc: namespace. */ - final String DC = "http://purl.org/dc/elements/1.1/"; + private static final String DC = "http://purl.org/dc/elements/1.1/"; /** * dc:publisher */ - final URI DC_PUBLISHER = new URIImpl(DC+"publisher"); + private static final URI DC_PUBLISHER = new URIImpl(DC+"publisher"); - /** * 8.2.1 Specifying the Default Graph * @@ -1709,5 +1710,108 @@ } + /** + * Unit test for case where there is a single named group which will be + * visited by the query. In this case, the decision tree says that we should + * bind the context position to the {@link IV} for that graph in order to + * restrict the query to exactly the desired graph (rather than using an + * expander pattern). However, the graph variable also needs to become bound + * in the query solutions. If we simply replace the graph variable with a + * constant, then the as-bound value of the graph variable will not be + * reported in the result set. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/359 + * + * @throws RepositoryException + * @throws SailException + * @throws MalformedQueryException + * @throws QueryEvaluationException + * @throws IOException + */ + public void test_ticket_359() throws RepositoryException, SailException, + MalformedQueryException, QueryEvaluationException, IOException { + if (log.isInfoEnabled()) + log.info("testing: range count predictes graph variable will have exactly one solution."); + + final BigdataSail sail = getSail(); + try { + sail.initialize(); + final BigdataSailRepository repo = new BigdataSailRepository(sail); + final BigdataSailRepositoryConnection cxn = (BigdataSailRepositoryConnection) repo + .getConnection(); + try { + + cxn.setAutoCommit(false); + + final BNode a = new BNodeImpl("_:a"); + final BNode b = new BNodeImpl("_:b"); +// final BNode z = new BNodeImpl("_:z"); + final URI alice = new URIImpl( + "http://example.org/foaf/aliceFoaf"); +// final URI bob = new URIImpl("http://example.org/foaf/bobFoaf"); + + cxn.add(a, FOAF_NAME, new LiteralImpl("Alice"), alice); + cxn.add(a, FOAF_MBOX, new URIImpl("mailto:alice@work.example"), + alice); + cxn.add(a, FOAF_KNOWS, b, alice); + cxn.add(b, FOAF_NAME, new LiteralImpl("Bob"), alice); + cxn.add(b, FOAF_MBOX, new URIImpl("mailto:bob@work.example"), + alice); + cxn.add(b, FOAF_NICK, new LiteralImpl("Bobby"), alice); + cxn.add(a, RDF.TYPE, FOAF_PPD, alice); +// cxn.add(b, RDFS.SEEALSO, bob, alice); +// cxn.add(bob, RDF.TYPE, FOAF_PPD, alice); +// cxn.add(z, FOAF_MBOX, new URIImpl("mailto:bob@work.example"), +// bob); +// cxn.add(z, RDFS.SEEALSO, bob, bob); +// cxn.add(z, FOAF_NICK, new LiteralImpl("Robert"), bob); +// cxn.add(bob, RDF.TYPE, FOAF_PPD, bob); + cxn.commit(); + if (log.isInfoEnabled()) { + log.info("\n" + sail.getDatabase().dumpStore().toString()); + } + + final String query = "PREFIX data: <http://example.org/foaf/> " + + "PREFIX foaf: <http://xmlns.com/foaf/0.1/> " + + "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> " + + "SELECT ?mbox ?nick ?ppd " + + "FROM NAMED <http://example.org/foaf/aliceFoaf> " + + "WHERE " + + "{ " + + " GRAPH ?ppd " + + " { " + + " ?alice foaf:mbox <mailto:alice@work.example> ; " + + " foaf:knows ?whom . " + + " ?whom foaf:mbox ?mbox ; " + + " foaf:nick ?nick " // + + " } " // + + "}"// + ; + + final TupleQuery tupleQuery = cxn.prepareTupleQuery( + QueryLanguage.SPARQL, query); + tupleQuery.setIncludeInferred(true /* includeInferred */); + final TupleQueryResult result = tupleQuery.evaluate(); + + final Collection<BindingSet> answer = new LinkedList<BindingSet>(); + answer.add(createBindingSet(// + new BindingImpl("mbox", new URIImpl( + "mailto:bob@work.example")),// + new BindingImpl("nick", new LiteralImpl("Bobby")), + new BindingImpl("ppd", alice))// + ); + + compare(result, answer); + + } finally { + cxn.close(); + + } + } finally { + sail.__tearDownUnitTest(); + } + + } + } Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java 2011-08-03 11:29:33 UTC (rev 5012) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java 2011-08-03 12:41:59 UTC (rev 5013) @@ -1253,9 +1253,22 @@ } if (summary.nknown == -1) { +// if (summary.nknown == 1) { /* * The dataset contains exactly one graph. Bind C. + * + * FIXME This code path has been effectively disabled per the ticket + * identified immediately below by comparing nknown with -1 rather + * than with 1. We currently lack a means to convey a binding for + * the graph variable without rewriting the predicate such that the + * graph variable is effectively removed from the query. However, I + * think that we might have a solution for that when we get into the + * RTO integration. In the meanwhile, the comparison with -1 ensures + * that we continue to produce the correct solutions. Enabling this + * code path in the future might provide a slight performance gain. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/359 */ pred = pred.asBound((IVariable<?>) pred.get(3), Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNamedGraphs.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNamedGraphs.java 2011-08-03 11:29:33 UTC (rev 5012) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNamedGraphs.java 2011-08-03 12:41:59 UTC (rev 5013) @@ -29,6 +29,7 @@ import java.io.IOException; import java.util.Collection; import java.util.LinkedList; + import org.apache.log4j.Logger; import org.openrdf.model.BNode; import org.openrdf.model.Resource; @@ -48,6 +49,8 @@ import org.openrdf.query.impl.BindingImpl; import org.openrdf.repository.RepositoryException; import org.openrdf.sail.SailException; + +import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.store.BD; /** @@ -60,7 +63,7 @@ */ public class TestNamedGraphs extends QuadsTestCase { - protected static final Logger log = Logger.getLogger(TestNamedGraphs.class); + private static final Logger log = Logger.getLogger(TestNamedGraphs.class); /** * @@ -78,44 +81,43 @@ /** * The foaf: namespace. */ - final String FOAF = "http://xmlns.com/foaf/0.1/"; + private static final String FOAF = "http://xmlns.com/foaf/0.1/"; /** * foaf:name */ - final URI FOAF_NAME = new URIImpl(FOAF+"name"); + private static final URI FOAF_NAME = new URIImpl(FOAF+"name"); /** * foaf:mbox */ - final URI FOAF_MBOX = new URIImpl(FOAF+"mbox"); + private static final URI FOAF_MBOX = new URIImpl(FOAF+"mbox"); /** * foaf:nick */ - final URI FOAF_NICK = new URIImpl(FOAF+"nick"); + private static final URI FOAF_NICK = new URIImpl(FOAF+"nick"); /** * foaf:PersonalProfileDocument */ - final URI FOAF_PPD = new URIImpl(FOAF+"PersonalProfileDocument"); + private static final URI FOAF_PPD = new URIImpl(FOAF+"PersonalProfileDocument"); /** * foaf:knows */ - final URI FOAF_KNOWS = new URIImpl(FOAF+"knows"); + private static final URI FOAF_KNOWS = new URIImpl(FOAF+"knows"); /** * The dc: namespace. */ - final String DC = "http://purl.org/dc/elements/1.1/"; + private static final String DC = "http://purl.org/dc/elements/1.1/"; /** * dc:publisher */ - final URI DC_PUBLISHER = new URIImpl(DC+"publisher"); + private static final URI DC_PUBLISHER = new URIImpl(DC+"publisher"); - /** * 8.2.1 Specifying the Default Graph * @@ -1709,5 +1711,108 @@ } + /** + * Unit test for case where there is a single named group which will be + * visited by the query. In this case, the decision tree says that we should + * bind the context position to the {@link IV} for that graph in order to + * restrict the query to exactly the desired graph (rather than using an + * expander pattern). However, the graph variable also needs to become bound + * in the query solutions. If we simply replace the graph variable with a + * constant, then the as-bound value of the graph variable will not be + * reported in the result set. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/359 + * + * @throws RepositoryException + * @throws SailException + * @throws MalformedQueryException + * @throws QueryEvaluationException + * @throws IOException + */ + public void test_ticket_359() throws RepositoryException, SailException, + MalformedQueryException, QueryEvaluationException, IOException { + if (log.isInfoEnabled()) + log.info("testing: range count predictes graph variable will have exactly one solution."); + + final BigdataSail sail = getSail(); + try { + sail.initialize(); + final BigdataSailRepository repo = new BigdataSailRepository(sail); + final BigdataSailRepositoryConnection cxn = (BigdataSailRepositoryConnection) repo + .getConnection(); + try { + + cxn.setAutoCommit(false); + + final BNode a = new BNodeImpl("_:a"); + final BNode b = new BNodeImpl("_:b"); +// final BNode z = new BNodeImpl("_:z"); + final URI alice = new URIImpl( + "http://example.org/foaf/aliceFoaf"); +// final URI bob = new URIImpl("http://example.org/foaf/bobFoaf"); + + cxn.add(a, FOAF_NAME, new LiteralImpl("Alice"), alice); + cxn.add(a, FOAF_MBOX, new URIImpl("mailto:alice@work.example"), + alice); + cxn.add(a, FOAF_KNOWS, b, alice); + cxn.add(b, FOAF_NAME, new LiteralImpl("Bob"), alice); + cxn.add(b, FOAF_MBOX, new URIImpl("mailto:bob@work.example"), + alice); + cxn.add(b, FOAF_NICK, new LiteralImpl("Bobby"), alice); + cxn.add(a, RDF.TYPE, FOAF_PPD, alice); +// cxn.add(b, RDFS.SEEALSO, bob, alice); +// cxn.add(bob, RDF.TYPE, FOAF_PPD, alice); +// cxn.add(z, FOAF_MBOX, new URIImpl("mailto:bob@work.example"), +// bob); +// cxn.add(z, RDFS.SEEALSO, bob, bob); +// cxn.add(z, FOAF_NICK, new LiteralImpl("Robert"), bob); +// cxn.add(bob, RDF.TYPE, FOAF_PPD, bob); + cxn.commit(); + if (log.isInfoEnabled()) { + log.info("\n" + sail.getDatabase().dumpStore().toString()); + } + + final String query = "PREFIX data: <http://example.org/foaf/> " + + "PREFIX foaf: <http://xmlns.com/foaf/0.1/> " + + "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> " + + "SELECT ?mbox ?nick ?ppd " + + "FROM NAMED <http://example.org/foaf/aliceFoaf> " + + "WHERE " + + "{ " + + " GRAPH ?ppd " + + " { " + + " ?alice foaf:mbox <mailto:alice@work.example> ; " + + " foaf:knows ?whom . " + + " ?whom foaf:mbox ?mbox ; " + + " foaf:nick ?nick " // + + " } " // + + "}"// + ; + + final TupleQuery tupleQuery = cxn.prepareTupleQuery( + QueryLanguage.SPARQL, query); + tupleQuery.setIncludeInferred(true /* includeInferred */); + final TupleQueryResult result = tupleQuery.evaluate(); + + final Collection<BindingSet> answer = new LinkedList<BindingSet>(); + answer.add(createBindingSet(// + new BindingImpl("mbox", new URIImpl( + "mailto:bob@work.example")),// + new BindingImpl("nick", new LiteralImpl("Bobby")), + new BindingImpl("ppd", alice))// + ); + + compare(result, answer); + + } finally { + cxn.close(); + + } + } finally { + sail.__tearDownUnitTest(); + } + + } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-08-03 12:46:20
|
Revision: 5014 http://bigdata.svn.sourceforge.net/bigdata/?rev=5014&view=rev Author: thompsonbry Date: 2011-08-03 12:46:14 +0000 (Wed, 03 Aug 2011) Log Message: ----------- Updated the 1.0.1 release notes to reflect recently closed issues. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_1.txt branches/TERMS_REFACTOR_BRANCH/bigdata/src/releases/RELEASE_1_0_1.txt Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_1.txt =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_1.txt 2011-08-03 12:41:59 UTC (rev 5013) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_1.txt 2011-08-03 12:46:14 UTC (rev 5014) @@ -58,6 +58,17 @@ - https://sourceforge.net/apps/trac/bigdata/ticket/355 (Query failure when comparing with non materialized value). + - https://sourceforge.net/apps/trac/bigdata/ticket/357 (RWStore reports + "FixedAllocator returning null address, with freeBits".) + + - https://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern + fails to bind graph variable if only one binding exists.) + + - https://sourceforge.net/apps/trac/bigdata/ticket/362 (log4j - slf4j bridge.) + +Note: Some of these bug fixes require data migration. For details, see +https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=DataMigration + New features: - Single machine data storage to ~50B triples/quads (RWStore); Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/releases/RELEASE_1_0_1.txt =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/releases/RELEASE_1_0_1.txt 2011-08-03 12:41:59 UTC (rev 5013) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/releases/RELEASE_1_0_1.txt 2011-08-03 12:46:14 UTC (rev 5014) @@ -58,6 +58,17 @@ - https://sourceforge.net/apps/trac/bigdata/ticket/355 (Query failure when comparing with non materialized value). + - https://sourceforge.net/apps/trac/bigdata/ticket/357 (RWStore reports + "FixedAllocator returning null address, with freeBits".) + + - https://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern + fails to bind graph variable if only one binding exists.) + + - https://sourceforge.net/apps/trac/bigdata/ticket/362 (log4j - slf4j bridge.) + +Note: Some of these bug fixes require data migration. For details, see +https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=DataMigration + New features: - Single machine data storage to ~50B triples/quads (RWStore); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-08-04 13:46:26
|
Revision: 5019 http://bigdata.svn.sourceforge.net/bigdata/?rev=5019&view=rev Author: thompsonbry Date: 2011-08-04 13:46:19 +0000 (Thu, 04 Aug 2011) Log Message: ----------- javadoc (spelling error) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/Journal.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/Journal.java 2011-08-04 12:00:22 UTC (rev 5018) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/Journal.java 2011-08-04 13:46:19 UTC (rev 5019) @@ -1055,7 +1055,7 @@ } /** - * Note: The transaction service si shutdown first, then the + * Note: The transaction service is shutdown first, then the * {@link #executorService}, then the {@link IConcurrencyManager}, the * {@link ITransactionService} and finally the {@link IResourceLockService}. */ Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java 2011-08-04 12:00:22 UTC (rev 5018) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java 2011-08-04 13:46:19 UTC (rev 5019) @@ -1055,7 +1055,7 @@ } /** - * Note: The transaction service si shutdown first, then the + * Note: The transaction service is shutdown first, then the * {@link #executorService}, then the {@link IConcurrencyManager}, the * {@link ITransactionService} and finally the {@link IResourceLockService}. */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-08-04 14:03:10
|
Revision: 5020 http://bigdata.svn.sourceforge.net/bigdata/?rev=5020&view=rev Author: thompsonbry Date: 2011-08-04 14:03:00 +0000 (Thu, 04 Aug 2011) Log Message: ----------- Bug fix for https://sourceforge.net/apps/trac/bigdata/ticket/361 (IRunningQuery does not terminate promptly). I have reduced the log level of the RejectedExecutionException from ERROR to WARN. This exception is not really an error per say, especially for a Journal. In a cluster this situation might cause problems with the termination of a query. Basically, the situation is that we are unable to notify a QueryEngine on a node that some evaluation pass of an operator for a query is finished. The QueryEngine serving as the controller for that query needs to observe those events to decide when the query is "done". So, if one node shuts down (rather than dies) while a part of the query is running on that node, then the controller might not receive the notice. Hence the log @ ERROR as a clue for a query which is not terminating properly. Such queries can be cancelled on the controller using their queryId and QueryEngine.getRunningQuery() to obtain the IRunningQuery and then invoking cancel() on that. We plan to add some administrative features to the NanoSparqlServer for this sort of thing (detecting and canceling long running queries). However, in all likelihood such queries will have RMI errors which lead to termination since the peer QueryEngine is dead. {{{ try { t.context.getExecutorService().execute( new SendHaltMessageTask(getQueryController(), msg, ChunkedRunningQuery.this)); } catch (RejectedExecutionException ex) { // e.g., service is shutting down. log.error("Could not send message: " + msg, ex); } }}} Anyway, that is not really the source of this problem. One problem was in AbstractChunkedResolverator.ChunkConsumerTask.call(). The loop there was not terminating as quickly as it should when the sink is closed. Fixing that now results in the RejectedExecutionException error coming up more quickly. The modified loop looks like this -- it explicitly checks buffer.isOpen(). {{{ // while buffer (aka sink) is open and source has more data. while (buffer.isOpen() && src.hasNext()) { // fetch the next chunk (already available). final E[] chunk = src.nextChunk(); if (!buffer.isOpen()) { /* * Asynchronous close of the sink. By checking * buffer.isOpen() here and in the while() clause, we * will notice a closed sink more rapidly and close * the source in a more timely manner. * * @see https://sourceforge.net/apps/trac/bigdata/ticket/361 */ break; } final F[] converted = resolveChunk(chunk); assert converted.length == chunk.length; // Note: Throws BufferClosedException if closed. buffer.add(converted); nchunks++; nelements += chunk.length; if (log.isDebugEnabled()) log.debug("nchunks=" + nchunks + ", chunkSize=" + chunk.length); } final long elapsed = (System.currentTimeMillis() - begin); if (log.isInfoEnabled()) log.info("Finished: nchunks=" + nchunks + ", nelements=" + nelements + ", elapsed=" + elapsed + "ms, sink.open" + buffer.isOpen()); return nelements; }}} However, the real problem was in AbstractChunkedResolverator#close(). The modified code is: {{{ public void close() { if (log.isInfoEnabled()) log.info("lastIndex=" + lastIndex + ", chunkSize=" + (chunk != null ? "" + chunk.length : "N/A")); /* * Explicitly close the source since we will not be reading anything * more from it. * * @see https://sourceforge.net/apps/trac/bigdata/ticket/361 */ src.close(); /* * Close the sink as well. The thread draining the sink's iterator will * notice that it has been closed. It will still drain anything already * buffered in the sink, but then the iterator() will report that no * more data is available rather than blocking. */ buffer.close(); chunk = null; } }}} With that change the test no longer logs the RejectedExecutionException because the IRunningQuery was correctly terminated. I have also modified TestTicket361 to fail if the query is not terminated using an assert on the #of running queries on the QueryEngine. If you comment out the src.close() in AbstractChunkedResolverator#close(), you can see that the test fails. TestTicket361 is now run from the quads test suite. I also made some minor modifications to Sesame2BigdataIterator to ensure that it complains if next() is called after close(). However, this does not appear to have been related to the problem. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket361.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java Added Paths: ----------- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket361.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2011-08-04 13:46:19 UTC (rev 5019) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2011-08-04 14:03:00 UTC (rev 5020) @@ -728,7 +728,7 @@ ChunkedRunningQuery.this)); } catch (RejectedExecutionException ex) { // e.g., service is shutting down. - log.error("Could not send message: " + msg, ex); + log.warn("Could not send message: " + msg, ex); } } catch (Throwable ex1) { Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java 2011-08-04 13:46:19 UTC (rev 5019) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java 2011-08-04 14:03:00 UTC (rev 5020) @@ -185,7 +185,6 @@ * a queue. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ private class ChunkConsumerTask implements Callable<Long> { @@ -209,23 +208,38 @@ long nchunks = 0; long nelements = 0; - - while (src.hasNext()) { - // fetch the next chunk. + // while buffer (aka sink) is open and source has more data. + while (buffer.isOpen() && src.hasNext()) { + + // fetch the next chunk (already available). final E[] chunk = src.nextChunk(); + if (!buffer.isOpen()) { + /* + * Asynchronous close of the sink. By checking + * buffer.isOpen() here and in the while() clause, we + * will notice a closed sink more rapidly and close + * the source in a more timely manner. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/361 + */ + break; + } + final F[] converted = resolveChunk(chunk); assert converted.length == chunk.length; + // Note: Throws BufferClosedException if closed. buffer.add(converted); nchunks++; nelements += chunk.length; if (log.isDebugEnabled()) - log.debug("nchunks="+nchunks+", chunkSize="+chunk.length); + log.debug("nchunks=" + nchunks + ", chunkSize=" + + chunk.length); } @@ -233,7 +247,8 @@ if (log.isInfoEnabled()) log.info("Finished: nchunks=" + nchunks + ", nelements=" - + nelements + ", elapsed=" + elapsed + "ms"); + + nelements + ", elapsed=" + elapsed + + "ms, sink.open" + buffer.isOpen()); return nelements; @@ -338,11 +353,20 @@ log.info("lastIndex=" + lastIndex + ", chunkSize=" + (chunk != null ? "" + chunk.length : "N/A")); - /* - * Asynchronous close by the consumer of the producer's buffer. This - * will cause the ChunkConsumerTask to abort if it is still running and - * that will cause the [src] to be closed. - */ + /* + * Explicitly close the source since we will not be reading anything + * more from it. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/361 + */ + src.close(); + + /* + * Close the sink as well. The thread draining the sink's iterator will + * notice that it has been closed. It will still drain anything already + * buffered in the sink, but then the iterator() will report that no + * more data is available rather than blocking. + */ buffer.close(); chunk = null; Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java 2011-08-04 13:46:19 UTC (rev 5019) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java 2011-08-04 14:03:00 UTC (rev 5020) @@ -27,6 +27,8 @@ package com.bigdata.rdf.sail; +import java.util.NoSuchElementException; + import info.aduna.iteration.CloseableIteration; import com.bigdata.striterator.ICloseableIterator; @@ -49,6 +51,8 @@ private final CloseableIteration<? extends T,E> src; + private volatile boolean open = true; + public Sesame2BigdataIterator(final CloseableIteration<? extends T,E> src) { if (src == null) @@ -59,17 +63,23 @@ } public void close() { - - try { - src.close(); - } catch(Exception e) { - throw new RuntimeException(e); + + if (open) { + open = false; + try { + src.close(); + } catch (Exception e) { + throw new RuntimeException(e); + } } } public boolean hasNext() { - + + if (!open) + return false; + try { return src.hasNext(); } catch(Exception e) { @@ -79,7 +89,11 @@ } public T next() { - + + if (!hasNext()) { + throw new NoSuchElementException(); + } + try { return src.next(); } catch(Exception e) { @@ -90,6 +104,9 @@ public void remove() { + if(!open) + throw new IllegalStateException(); + try { src.remove(); } catch(Exception e) { Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java 2011-08-04 13:46:19 UTC (rev 5019) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java 2011-08-04 14:03:00 UTC (rev 5020) @@ -123,6 +123,7 @@ suite.addTestSuite(com.bigdata.rdf.sail.TestTicket352.class); suite.addTestSuite(com.bigdata.rdf.sail.TestTicket353.class); suite.addTestSuite(com.bigdata.rdf.sail.TestTicket355.class); + suite.addTestSuite(com.bigdata.rdf.sail.TestTicket361.class); suite.addTestSuite(com.bigdata.rdf.sail.DavidsTestBOps.class); Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket361.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket361.java 2011-08-04 13:46:19 UTC (rev 5019) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket361.java 2011-08-04 14:03:00 UTC (rev 5020) @@ -28,7 +28,10 @@ import java.util.Arrays; import java.util.Collection; import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.UUID; +import org.apache.log4j.Logger; import org.openrdf.model.impl.LiteralImpl; import org.openrdf.model.impl.URIImpl; import org.openrdf.query.BindingSet; @@ -42,6 +45,8 @@ import org.openrdf.query.algebra.evaluation.QueryBindingSet; import org.openrdf.sail.SailException; +import com.bigdata.bop.engine.QueryEngine; +import com.bigdata.bop.fed.QueryEngineFactory; import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; /** @@ -67,6 +72,8 @@ */ public class TestTicket361 extends QuadsTestCase { + private final static Logger log = Logger.getLogger(TestTicket361.class); + public TestTicket361() { } @@ -83,72 +90,151 @@ conn.addStatement(new URIImpl("s:1"), new URIImpl("p:1"), new LiteralImpl("l1")); conn.addStatement(new URIImpl("s:2"), new URIImpl("p:2"), new LiteralImpl("l1")); conn.addStatement(new URIImpl("s:3"), new URIImpl("p:3"), new LiteralImpl("l3")); - query(conn); + CloseableIteration<? extends BindingSet, QueryEvaluationException> results = null; + try { + // submit query + results = query(conn); + } finally { + if (results != null) { + // immediately close the query result iteration. + log.info("Closing query result iteration."); + results.close(); + } + } } finally { - System.out.println("Closing connection"); + log.info("Closing connection."); conn.close(); } } finally { - System.out.println("Shutting down sail"); + final QueryEngine queryEngine = QueryEngineFactory + .getExistingQueryController(sail.getDatabase() + .getIndexManager()); + if (queryEngine != null) { + /* + * Note: The query engine should shutdown automatically once it + * is finalized. This protects against a shutdown when there are + * concurrent users, e.g., different sails against the same + * Journal instance. However, if there are any queries still + * running on the QueryEngine when the backing IIndexManager + * shuts down its ExecutorService, then a + * RejectedExecutionException will be logged. In general, this + * can be safely ignored. + */ + final UUID[] uuids = queryEngine.getRunningQueries(); + assertEquals("Query not terminated: " + Arrays.toString(uuids), + new UUID[0], uuids); +// log.info("Shutting down QueryEngine"); +// queryEngine.shutdown(); + } + log.info("Shutting down sail"); sail.shutDown(); - System.out.println("Tear down"); + log.info("Tear down"); sail.__tearDownUnitTest(); } } - private void query(final BigdataSailConnection conn) throws SailException, QueryEvaluationException { - final ProjectionElemList elemList = new ProjectionElemList(new ProjectionElem("z")); - final TupleExpr query = new Projection(new StatementPattern(new Var("s"), new Var("p"), new Var("o")), elemList); - final QueryBindingSet bindings = mb("o", "l1", "o", "l2", "o", "l3"); - final CloseableIteration<? extends BindingSet, QueryEvaluationException> results; - results = conn.evaluate(query, null, new QueryBindingSet(), new Iter(bindings), false, null); - results.close(); + private CloseableIteration<? extends BindingSet, QueryEvaluationException> query( + final BigdataSailConnection conn) throws SailException, + QueryEvaluationException { + + final ProjectionElemList elemList = new ProjectionElemList( + new ProjectionElem("z")); + + final TupleExpr query = new Projection(new StatementPattern( + new Var("s"), new Var("p"), new Var("o")), elemList); + + final QueryBindingSet bindings = mb("o", "l1", "o1", "l2", "o2", "l3"); + + return conn.evaluate(query, null, new QueryBindingSet(), new Iter( + bindings), false, null); + } - + /** - * Makes a binding set by taking each pair of values and using the first value as name and the second as value. - * Creates an URI for a value with a ':' in it, or a Literal for a value without a ':'. + * Makes a binding set by taking each pair of values and using the first + * value as name and the second as value. Creates an URI for a value with a + * ':' in it, or a Literal for a value without a ':'. */ - private QueryBindingSet mb(String... nameValuePairs) { + private QueryBindingSet mb(final String... nameValuePairs) { final QueryBindingSet bs = new QueryBindingSet(); for (int i = 0; i < nameValuePairs.length; i += 2) - bs.addBinding(nameValuePairs[i], nameValuePairs[i + 1].indexOf(':') > 0 ? new URIImpl(nameValuePairs[i + 1]) : new LiteralImpl(nameValuePairs[i + 1])); + bs.addBinding(nameValuePairs[i], + nameValuePairs[i + 1].indexOf(':') > 0 ? new URIImpl( + nameValuePairs[i + 1]) : new LiteralImpl( + nameValuePairs[i + 1])); return bs; } /** * Iterates over the given bindings. */ - private static class Iter implements CloseableIteration<BindingSet, QueryEvaluationException> { + private static class Iter implements + CloseableIteration<BindingSet, QueryEvaluationException> { + final private Iterator<BindingSet> iter; + private volatile boolean open = true; - private Iter(Collection<BindingSet> bindings) { + private Iter(final Collection<BindingSet> bindings) { + this.iter = bindings.iterator(); + } - private Iter(BindingSet... bindings) { + private Iter(final BindingSet... bindings) { + this(Arrays.asList(bindings)); + } - +// private int ncalls = 0; public boolean hasNext() throws QueryEvaluationException { +// log.error("Callers: ",new RuntimeException("caller#"+(++ncalls))); try { + /* + * Note: hasNext() is called ~ 6 times during the test, so this + * timeout gets multiplied. + */ Thread.sleep(1000); } catch (InterruptedException e) { throw new RuntimeException(e); } + + if (!open) + return false; + return iter.hasNext(); + } public BindingSet next() throws QueryEvaluationException { + + if (!hasNext()) + throw new NoSuchElementException(); + return iter.next(); + } public void remove() throws QueryEvaluationException { + + if (!open) + throw new IllegalStateException(); + iter.remove(); + } public void close() throws QueryEvaluationException { - System.out.println("Source iteration closed"); + + if (open) { + + open = false; + + log.info("Source iteration closed"); + + } + } + } + } Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2011-08-04 13:46:19 UTC (rev 5019) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2011-08-04 14:03:00 UTC (rev 5020) @@ -728,7 +728,7 @@ ChunkedRunningQuery.this)); } catch (RejectedExecutionException ex) { // e.g., service is shutting down. - log.error("Could not send message: " + msg, ex); + log.warn("Could not send message: " + msg, ex); } } catch (Throwable ex1) { Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java 2011-08-04 13:46:19 UTC (rev 5019) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java 2011-08-04 14:03:00 UTC (rev 5020) @@ -132,17 +132,17 @@ if (resolvedItr != null) throw new IllegalStateException(); - /* - * Create a task which reads chunks from the source iterator and writes - * resolved chunks on the buffer. - */ - final FutureTask<Long> ft = new FutureTask<Long>( - new ChunkConsumerTask()); + /* + * Create a task which reads chunks from the source iterator and writes + * resolved chunks on the buffer. + */ + final FutureTask<Long> ft = new FutureTask<Long>( + new ChunkConsumerTask()); - /* - * Set the future for that task on the buffer. - */ - buffer.setFuture(ft); + /* + * Set the future for that task on the buffer. + */ + buffer.setFuture(ft); /* * This class will read resolved chunks from the [resolvedItr] and then @@ -150,8 +150,8 @@ */ resolvedItr = buffer.iterator(); - // Submit the task for execution. - service.execute(ft); + // Submit the task for execution. + service.execute(ft); // /* // * Create and run a task which reads chunks from the source iterator and @@ -185,7 +185,6 @@ * a queue. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ private class ChunkConsumerTask implements Callable<Long> { @@ -209,23 +208,38 @@ long nchunks = 0; long nelements = 0; - - while (src.hasNext()) { - // fetch the next chunk. + // while buffer (aka sink) is open and source has more data. + while (buffer.isOpen() && src.hasNext()) { + + // fetch the next chunk (already available). final E[] chunk = src.nextChunk(); + if (!buffer.isOpen()) { + /* + * Asynchronous close of the sink. By checking + * buffer.isOpen() here and in the while() clause, we + * will notice a closed sink more rapidly and close + * the source in a more timely manner. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/361 + */ + break; + } + final F[] converted = resolveChunk(chunk); assert converted.length == chunk.length; + // Note: Throws BufferClosedException if closed. buffer.add(converted); nchunks++; nelements += chunk.length; if (log.isDebugEnabled()) - log.debug("nchunks="+nchunks+", chunkSize="+chunk.length); + log.debug("nchunks=" + nchunks + ", chunkSize=" + + chunk.length); } @@ -233,17 +247,18 @@ if (log.isInfoEnabled()) log.info("Finished: nchunks=" + nchunks + ", nelements=" - + nelements + ", elapsed=" + elapsed + "ms"); + + nelements + ", elapsed=" + elapsed + + "ms, sink.open" + buffer.isOpen()); return nelements; } finally { - try { - src.close(); - } finally { - buffer.close(); - } + try { + src.close(); + } finally { + buffer.close(); + } } @@ -338,11 +353,20 @@ log.info("lastIndex=" + lastIndex + ", chunkSize=" + (chunk != null ? "" + chunk.length : "N/A")); - /* - * Asynchronous close by the consumer of the producer's buffer. This - * will cause the ChunkConsumerTask to abort if it is still running and - * that will cause the [src] to be closed. - */ + /* + * Explicitly close the source since we will not be reading anything + * more from it. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/361 + */ + src.close(); + + /* + * Close the sink as well. The thread draining the sink's iterator will + * notice that it has been closed. It will still drain anything already + * buffered in the sink, but then the iterator() will report that no + * more data is available rather than blocking. + */ buffer.close(); chunk = null; Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java 2011-08-04 13:46:19 UTC (rev 5019) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java 2011-08-04 14:03:00 UTC (rev 5020) @@ -27,6 +27,8 @@ package com.bigdata.rdf.sail; +import java.util.NoSuchElementException; + import info.aduna.iteration.CloseableIteration; import com.bigdata.striterator.ICloseableIterator; @@ -49,6 +51,8 @@ private final CloseableIteration<? extends T,E> src; + private volatile boolean open = true; + public Sesame2BigdataIterator(final CloseableIteration<? extends T,E> src) { if (src == null) @@ -59,17 +63,23 @@ } public void close() { - - try { - src.close(); - } catch(Exception e) { - throw new RuntimeException(e); + + if (open) { + open = false; + try { + src.close(); + } catch (Exception e) { + throw new RuntimeException(e); + } } } public boolean hasNext() { - + + if (!open) + return false; + try { return src.hasNext(); } catch(Exception e) { @@ -79,7 +89,11 @@ } public T next() { - + + if (!hasNext()) { + throw new NoSuchElementException(); + } + try { return src.next(); } catch(Exception e) { @@ -90,6 +104,9 @@ public void remove() { + if(!open) + throw new IllegalStateException(); + try { src.remove(); } catch(Exception e) { Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java 2011-08-04 13:46:19 UTC (rev 5019) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java 2011-08-04 14:03:00 UTC (rev 5020) @@ -123,6 +123,7 @@ suite.addTestSuite(com.bigdata.rdf.sail.TestTicket352.class); suite.addTestSuite(com.bigdata.rdf.sail.TestTicket353.class); suite.addTestSuite(com.bigdata.rdf.sail.TestTicket355.class); + suite.addTestSuite(com.bigdata.rdf.sail.TestTicket361.class); suite.addTestSuite(com.bigdata.rdf.sail.DavidsTestBOps.class); Added: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket361.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket361.java (rev 0) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket361.java 2011-08-04 14:03:00 UTC (rev 5020) @@ -0,0 +1,240 @@ +/** +Copyright (C) SYSTAP, LLC 2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +package com.bigdata.rdf.sail; + +import info.aduna.iteration.CloseableIteration; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.UUID; + +import org.apache.log4j.Logger; +import org.openrdf.model.impl.LiteralImpl; +import org.openrdf.model.impl.URIImpl; +import org.openrdf.query.BindingSet; +import org.openrdf.query.QueryEvaluationException; +import org.openrdf.query.algebra.Projection; +import org.openrdf.query.algebra.ProjectionElem; +import org.openrdf.query.algebra.ProjectionElemList; +import org.openrdf.query.algebra.StatementPattern; +import org.openrdf.query.algebra.TupleExpr; +import org.openrdf.query.algebra.Var; +import org.openrdf.query.algebra.evaluation.QueryBindingSet; +import org.openrdf.sail.SailException; + +import com.bigdata.bop.engine.QueryEngine; +import com.bigdata.bop.fed.QueryEngineFactory; +import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; + +/** + * Unit test template for use in submission of bugs. + * <p> + * This test case will delegate to an underlying backing store. You can specify + * this store via a JVM property as follows: + * <code>-DtestClass=com.bigdata.rdf.sail.TestBigdataSailWithQuads</code> + * <p> + * There are three possible configurations for the testClass: + * <ul> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithQuads (quads mode)</li> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithoutSids (triples mode)</li> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithSids (SIDs mode)</li> + * </ul> + * <p> + * The default for triples and SIDs mode is for inference with truth maintenance + * to be on. If you would like to turn off inference, make sure to do so in + * {@link #getProperties()}. + * + * @author <a href="mailto:mrp...@us...">Mike Personick</a> + * @version $Id$ + */ +public class TestTicket361 extends QuadsTestCase { + + private final static Logger log = Logger.getLogger(TestTicket361.class); + + public TestTicket361() { + } + + public TestTicket361(String arg0) { + super(arg0); + } + + public void testEvaluate() throws Exception { + final BigdataSail sail = getSail(); + try { + sail.initialize(); + final BigdataSailConnection conn = sail.getConnection(); + try { + conn.addStatement(new URIImpl("s:1"), new URIImpl("p:1"), new LiteralImpl("l1")); + conn.addStatement(new URIImpl("s:2"), new URIImpl("p:2"), new LiteralImpl("l1")); + conn.addStatement(new URIImpl("s:3"), new URIImpl("p:3"), new LiteralImpl("l3")); + CloseableIteration<? extends BindingSet, QueryEvaluationException> results = null; + try { + // submit query + results = query(conn); + } finally { + if (results != null) { + // immediately close the query result iteration. + log.info("Closing query result iteration."); + results.close(); + } + } + } finally { + log.info("Closing connection."); + conn.close(); + } + } finally { + final QueryEngine queryEngine = QueryEngineFactory + .getExistingQueryController(sail.getDatabase() + .getIndexManager()); + if (queryEngine != null) { + /* + * Note: The query engine should shutdown automatically once it + * is finalized. This protects against a shutdown when there are + * concurrent users, e.g., different sails against the same + * Journal instance. However, if there are any queries still + * running on the QueryEngine when the backing IIndexManager + * shuts down its ExecutorService, then a + * RejectedExecutionException will be logged. In general, this + * can be safely ignored. + */ + final UUID[] uuids = queryEngine.getRunningQueries(); + assertEquals("Query not terminated: " + Arrays.toString(uuids), + new UUID[0], uuids); +// log.info("Shutting down QueryEngine"); +// queryEngine.shutdown(); + } + log.info("Shutting down sail"); + sail.shutDown(); + log.info("Tear down"); + sail.__tearDownUnitTest(); + } + } + + private CloseableIteration<? extends BindingSet, QueryEvaluationException> query( + final BigdataSailConnection conn) throws SailException, + QueryEvaluationException { + + final ProjectionElemList elemList = new ProjectionElemList( + new ProjectionElem("z")); + + final TupleExpr query = new Projection(new StatementPattern( + new Var("s"), new Var("p"), new Var("o")), elemList); + + final QueryBindingSet bindings = mb("o", "l1", "o1", "l2", "o2", "l3"); + + return conn.evaluate(query, null, new QueryBindingSet(), new Iter( + bindings), false, null); + + } + + /** + * Makes a binding set by taking each pair of values and using the first + * value as name and the second as value. Creates an URI for a value with a + * ':' in it, or a Literal for a value without a ':'. + */ + private QueryBindingSet mb(final String... nameValuePairs) { + final QueryBindingSet bs = new QueryBindingSet(); + for (int i = 0; i < nameValuePairs.length; i += 2) + bs.addBinding(nameValuePairs[i], + nameValuePairs[i + 1].indexOf(':') > 0 ? new URIImpl( + nameValuePairs[i + 1]) : new LiteralImpl( + nameValuePairs[i + 1])); + return bs; + } + + /** + * Iterates over the given bindings. + */ + private static class Iter implements + CloseableIteration<BindingSet, QueryEvaluationException> { + + final private Iterator<BindingSet> iter; + private volatile boolean open = true; + + private Iter(final Collection<BindingSet> bindings) { + + this.iter = bindings.iterator(); + + } + + private Iter(final BindingSet... bindings) { + + this(Arrays.asList(bindings)); + + } +// private int ncalls = 0; + public boolean hasNext() throws QueryEvaluationException { +// log.error("Callers: ",new RuntimeException("caller#"+(++ncalls))); + try { + /* + * Note: hasNext() is called ~ 6 times during the test, so this + * timeout gets multiplied. + */ + Thread.sleep(1000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + if (!open) + return false; + + return iter.hasNext(); + + } + + public BindingSet next() throws QueryEvaluationException { + + if (!hasNext()) + throw new NoSuchElementException(); + + return iter.next(); + + } + + public void remove() throws QueryEvaluationException { + + if (!open) + throw new IllegalStateException(); + + iter.remove(); + + } + + public void close() throws QueryEvaluationException { + + if (open) { + + open = false; + + log.info("Source iteration closed"); + + } + + } + + } + +} Property changes on: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket361.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-08-04 15:44:29
|
Revision: 5021 http://bigdata.svn.sourceforge.net/bigdata/?rev=5021&view=rev Author: thompsonbry Date: 2011-08-04 15:44:21 +0000 (Thu, 04 Aug 2011) Log Message: ----------- If you modify com.bigdata.relation.accesspath.WrappedAsynchronousIterator#hasNext() as follows then the source iteration appears to be closed in a timely manner. {{{ public boolean hasNext() { if(open && src.hasNext()) return true; /* * Explicit close so we close the source as well when this is exhausted. * * @see https://sourceforge.net/apps/trac/bigdata/ticket/361 */ close(); return false; } }}} This seems to me indicative of a pattern which we should be applying to both our ICloseableIterator and the openrdf ClosableIteration. We probably need to audit all implementations of both interfaces for this pattern. This commit contains the results of that code audit. See https://sourceforge.net/apps/trac/bigdata/ticket/361 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2Sesame2BindingSetIterator.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2SesameIteration.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataConstructIterator.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/HitConvertor.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/QueryEvaluationIterator.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIteration.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2Sesame2BindingSetIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2SesameIteration.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataConstructIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/HitConvertor.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/QueryEvaluationIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIteration.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -49,6 +49,8 @@ */ public class WrappedAsynchronousIterator<E,F> implements IAsynchronousIterator<E> { +// private static final Logger log = Logger.getLogger(WrappedAsynchronousIterator.class); + private transient boolean open = true; private final IChunkedIterator<F> src; @@ -79,8 +81,18 @@ public boolean hasNext() { - return open && src.hasNext(); + if(open && src.hasNext()) + return true; + /* + * Explicit close so we close the source as well when this is exhausted. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/361 + */ + close(); + + return false; + } @SuppressWarnings("unchecked") @@ -110,7 +122,8 @@ open = false; // if (src instanceof ICloseableIterator<?>) { - +// if (log.isDebugEnabled()) +// log.debug("Close", new RuntimeException()); ((ICloseableIterator<?>) src).close(); // } Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -45,12 +45,8 @@ */ public class ChunkedWrappedIterator<E> implements IChunkedOrderedIterator<E> { - protected static transient final Logger log = Logger.getLogger(ChunkedWrappedIterator.class); + private static transient final Logger log = Logger.getLogger(ChunkedWrappedIterator.class); - protected static final boolean INFO = log.isInfoEnabled(); - - protected static final boolean DEBUG = log.isDebugEnabled(); - private boolean open = true; private final Class<? extends E> elementClass; @@ -72,8 +68,8 @@ private final IKeyOrder<E> keyOrder; - /** Optional filter applied to the source iterator. */ - private final IElementFilter<E> filter; +// /** Optional filter applied to the source iterator. */ +// private final IElementFilter<E> filter; private long nchunks = 0L; private long nelements = 0L; @@ -166,7 +162,7 @@ this.keyOrder = keyOrder; - this.filter = filter; +// this.filter = filter; } @@ -183,7 +179,7 @@ } - if(INFO) + if(log.isInfoEnabled()) log.info("#chunks="+nchunks+", #elements="+nelements); } @@ -193,10 +189,19 @@ */ public boolean hasNext() { - if(!open) return false; - - return src.hasNext(); + if(open && src.hasNext()) + return true; + /* + * Explicit close() so we close the source also when this iterator is + * exhausted. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/361 + */ + close(); + + return false; + } /** @@ -274,7 +279,7 @@ nchunks++; nelements += n; - if (DEBUG) + if (log.isDebugEnabled()) log.debug("#chunks=" + nchunks + ", chunkSize=" + chunk.length + ", #elements=" + nelements); Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2Sesame2BindingSetIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2Sesame2BindingSetIterator.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2Sesame2BindingSetIterator.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -5,19 +5,15 @@ import java.util.Iterator; import java.util.Map; import java.util.NoSuchElementException; -import java.util.Set; import org.apache.log4j.Logger; -import org.openrdf.model.Value; import org.openrdf.query.Binding; import org.openrdf.query.BindingSet; -import org.openrdf.query.impl.BindingImpl; import org.openrdf.query.impl.MapBindingSet; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IConstant; import com.bigdata.bop.IVariable; -import com.bigdata.bop.Var; import com.bigdata.rdf.model.BigdataValue; import com.bigdata.striterator.ICloseableIterator; @@ -44,6 +40,8 @@ private final BindingSet constants; + private boolean open = true; + /** * * @param src @@ -69,8 +67,13 @@ public boolean hasNext() throws E { - return src.hasNext(); + if(open && src.hasNext()) + return true; + close(); + + return false; + } public BindingSet next() throws E { @@ -142,7 +145,13 @@ public void close() throws E { - src.close(); + if(open) { + + open = false; + + src.close(); + + } } Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2SesameIteration.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2SesameIteration.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2SesameIteration.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -27,6 +27,8 @@ package com.bigdata.rdf.sail; +import java.util.NoSuchElementException; + import info.aduna.iteration.CloseableIteration; import com.bigdata.striterator.ICloseableIterator; @@ -48,6 +50,8 @@ private final ICloseableIterator<? extends T> src; + private boolean open = true; + public Bigdata2SesameIteration(final ICloseableIterator<? extends T> src) { if (src == null) @@ -59,24 +63,41 @@ public void close() throws E { - src.close(); + if (open) { + + open = false; + + src.close(); + + } } public boolean hasNext() throws E { - return src.hasNext(); + if(open && src.hasNext()) + return true; + close(); + + return false; + } public T next() throws E { + if (!hasNext()) + throw new NoSuchElementException(); + return src.next(); } public void remove() throws E { + if(!open) + throw new IllegalStateException(); + src.remove(); } Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataConstructIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataConstructIterator.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataConstructIterator.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -1,9 +1,12 @@ package com.bigdata.rdf.sail; import info.aduna.iteration.CloseableIteration; + import java.util.Collection; import java.util.Iterator; import java.util.LinkedList; +import java.util.NoSuchElementException; + import org.openrdf.model.Resource; import org.openrdf.model.Statement; import org.openrdf.model.URI; @@ -11,13 +14,13 @@ import org.openrdf.model.ValueFactory; import org.openrdf.query.BindingSet; import org.openrdf.query.QueryEvaluationException; + import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.model.BigdataValue; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.SPO; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.store.BigdataStatementIterator; -import com.bigdata.rdf.store.IRawTripleStore; import com.bigdata.striterator.ChunkedWrappedIterator; import com.bigdata.striterator.ICloseableIterator; @@ -35,6 +38,8 @@ private final ValueFactory vf; + private boolean open = true; + public BigdataConstructIterator( final AbstractTripleStore db, final CloseableIteration<? extends BindingSet, QueryEvaluationException> src, @@ -53,6 +58,17 @@ } public boolean hasNext() throws QueryEvaluationException { + + if(open && _hasNext()) + return true; + + close(); + + return false; + + } + + private boolean _hasNext() throws QueryEvaluationException { if (stmtIt.hasNext()) { return true; } @@ -79,9 +95,15 @@ } public void close() throws QueryEvaluationException { + + if(open) { + + open = false; + + stmtIt.close(); + + } - stmtIt.close(); - } /** @@ -89,12 +111,12 @@ * * @param bindingSet */ - private void addToLeftovers(BindingSet bindingSet) { + private void addToLeftovers(final BindingSet bindingSet) { - Resource subject = (Resource)bindingSet.getValue("subject"); - URI predicate = (URI)bindingSet.getValue("predicate"); - Value object = bindingSet.getValue("object"); - Resource context = (Resource)bindingSet.getValue("context"); + final Resource subject = (Resource)bindingSet.getValue("subject"); + final URI predicate = (URI)bindingSet.getValue("predicate"); + final Value object = bindingSet.getValue("object"); + final Resource context = (Resource)bindingSet.getValue("context"); if (context == null) { leftovers.add(vf.createStatement(subject, predicate, object)); } @@ -110,6 +132,8 @@ private SPO next; + private boolean open = true; + public SPOConverter( final CloseableIteration<? extends BindingSet, QueryEvaluationException> src) { @@ -118,10 +142,13 @@ } public void close() { - try { - src.close(); - } catch (QueryEvaluationException ex) { - throw new RuntimeException(ex); + if (open) { + open = false; + try { + src.close(); + } catch (QueryEvaluationException ex) { + throw new RuntimeException(ex); + } } } @@ -131,6 +158,17 @@ * need to handle that separately (without trying to resolve an SPO). */ public boolean hasNext() { + + if (open && _hasNext()) + return true; + + close(); + + return false; + + } + + private boolean _hasNext() { try { // we already have our next lined up if (next != null) { @@ -159,14 +197,17 @@ public SPO next() { // getting the next is actually handled in hasNext() - hasNext(); + if(!hasNext()) + throw new NoSuchElementException(); // clear out the next so we can get a new one - SPO spo = next; + final SPO spo = next; next = null; return spo; } public void remove() { + if (!open) + throw new IllegalStateException(); try { src.remove(); } catch (QueryEvaluationException ex) { @@ -178,7 +219,7 @@ * Make sure all of the three positions are non-null - i.e. the terms * actually exist in the lexicon (not always the case with construct). */ - private boolean isValid(SPO spo) { + private boolean isValid(final SPO spo) { return spo.s != null && spo.p != null && spo.o != null; @@ -188,10 +229,10 @@ * Convert a bindingset into an SPO. All values should already be * bigdata values, we dont' use db.getTermId(Value). */ - private SPO convert(BindingSet bindingSet) { - Value subject = bindingSet.getValue("subject"); - Value predicate = bindingSet.getValue("predicate"); - Value object = bindingSet.getValue("object"); + private SPO convert(final BindingSet bindingSet) { + final Value subject = bindingSet.getValue("subject"); + final Value predicate = bindingSet.getValue("predicate"); + final Value object = bindingSet.getValue("object"); IV s = null; if (subject instanceof BigdataValue) { s = ((BigdataValue) subject).getIV(); @@ -204,7 +245,7 @@ if (object instanceof BigdataValue) { o = ((BigdataValue) object).getIV(); } - SPO spo = new SPO(s, p, o); + final SPO spo = new SPO(s, p, o); return spo; } Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -2611,14 +2611,25 @@ // return new CloseableIteration</*? extends*/ Resource, SailException>() { return new CloseableIteration<Resource, SailException>() { - Resource next = null; + private Resource next = null; + private boolean open = true; public void close() throws SailException { - next = null; - itr2.close(); + if (open) { + open = false; + next = null; + itr2.close(); + } } public boolean hasNext() throws SailException { + if(open && _hasNext()) + return true; + close(); + return false; + } + + private boolean _hasNext() throws SailException { if (next != null) return true; while (itr2.hasNext()) { Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/HitConvertor.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/HitConvertor.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/HitConvertor.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -46,8 +46,9 @@ final private Set<URI> graphs; private BigdataValue next; + + private boolean open = true; - @SuppressWarnings("unchecked") public HitConvertor(final AbstractTripleStore database, final Iterator<IHit> src, final Var svar, final BindingSet bindings) { @@ -104,7 +105,7 @@ * @param value * The value. */ - protected boolean isValid(BigdataValue value) { + protected boolean isValid(final BigdataValue value) { if (graphs != null) { // check each graph to see if the literal appears in a statement @@ -123,12 +124,29 @@ public void close() throws QueryEvaluationException { - src.close(); + if (open) { + + open = false; + + src.close(); + + } } public boolean hasNext() throws QueryEvaluationException { + if (open && _hasNext()) + return true; + + close(); + + return false; + + } + + private boolean _hasNext() throws QueryEvaluationException { + if (next != null) return true; @@ -150,7 +168,6 @@ } - /** * Binds the next {@link BigdataValue} (must be a Literal). * Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/QueryEvaluationIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/QueryEvaluationIterator.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/QueryEvaluationIterator.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -1,5 +1,7 @@ package com.bigdata.rdf.sail; +import java.util.NoSuchElementException; + import info.aduna.iteration.CloseableIteration; import org.openrdf.query.QueryEvaluationException; @@ -17,6 +19,8 @@ private final CloseableIteration<? extends T, SailException> src; + private boolean open = true; + public QueryEvaluationIterator( CloseableIteration<? extends T, SailException> src) { @@ -28,6 +32,17 @@ public boolean hasNext() throws QueryEvaluationException { + if(open && _hasNext()) + return true; + + close(); + + return false; + + } + + private boolean _hasNext() throws QueryEvaluationException { + try { return src.hasNext(); @@ -37,11 +52,14 @@ throw new QueryEvaluationException(ex); } - + } - + public T next() throws QueryEvaluationException { + if(!hasNext()) + throw new NoSuchElementException(); + try { return (T) src.next(); @@ -56,6 +74,9 @@ public void remove() throws QueryEvaluationException { + if (!open) + throw new IllegalStateException(); + try { src.remove(); @@ -65,19 +86,25 @@ throw new QueryEvaluationException(ex); } - + } - + public void close() throws QueryEvaluationException { - try { + if (open) { - src.close(); - - } catch(SailException ex) { - - throw new QueryEvaluationException(ex); - + open = false; + + try { + + src.close(); + + } catch (SailException ex) { + + throw new QueryEvaluationException(ex); + + } + } } Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIteration.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIteration.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIteration.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -30,6 +30,8 @@ * return <code>true</code>. */ private E current = null; + + private boolean open = true; public RunningQueryCloseableIteration(final IRunningQuery runningQuery, final CloseableIteration<E, X> src) { @@ -40,12 +42,26 @@ } public void close() throws X { - runningQuery.cancel(true/* mayInterruptIfRunning */); - src.close(); + if (open) { + open = false; + runningQuery.cancel(true/* mayInterruptIfRunning */); + src.close(); + } } - public boolean hasNext() throws X { + public boolean hasNext() throws X { + if (open && _hasNext()) + return true; + + close(); + + return false; + + } + + private boolean _hasNext() throws X { + if (current != null) { // Already buffered. return true; Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -79,8 +79,13 @@ public boolean hasNext() { - return open && src.hasNext(); + if (open && src.hasNext()) + return true; + close(); + + return false; + } @SuppressWarnings("unchecked") Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -45,12 +45,8 @@ */ public class ChunkedWrappedIterator<E> implements IChunkedOrderedIterator<E> { - protected static transient final Logger log = Logger.getLogger(ChunkedWrappedIterator.class); + private static transient final Logger log = Logger.getLogger(ChunkedWrappedIterator.class); - protected static final boolean INFO = log.isInfoEnabled(); - - protected static final boolean DEBUG = log.isDebugEnabled(); - private boolean open = true; private final Class<? extends E> elementClass; @@ -72,8 +68,8 @@ private final IKeyOrder<E> keyOrder; - /** Optional filter applied to the source iterator. */ - private final IElementFilter<E> filter; +// /** Optional filter applied to the source iterator. */ +// private final IElementFilter<E> filter; private long nchunks = 0L; private long nelements = 0L; @@ -166,7 +162,7 @@ this.keyOrder = keyOrder; - this.filter = filter; +// this.filter = filter; } @@ -183,7 +179,7 @@ } - if(INFO) + if(log.isInfoEnabled()) log.info("#chunks="+nchunks+", #elements="+nelements); } @@ -193,9 +189,18 @@ */ public boolean hasNext() { - if(!open) return false; + if(open && src.hasNext()) + return true; + + /* + * Explicit close() so we close the source also when this iterator is + * exhausted. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/361 + */ + close(); - return src.hasNext(); + return false; } @@ -274,7 +279,7 @@ nchunks++; nelements += n; - if (DEBUG) + if (log.isDebugEnabled()) log.debug("#chunks=" + nchunks + ", chunkSize=" + chunk.length + ", #elements=" + nelements); Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2Sesame2BindingSetIterator.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2Sesame2BindingSetIterator.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2Sesame2BindingSetIterator.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -5,19 +5,15 @@ import java.util.Iterator; import java.util.Map; import java.util.NoSuchElementException; -import java.util.Set; import org.apache.log4j.Logger; -import org.openrdf.model.Value; import org.openrdf.query.Binding; import org.openrdf.query.BindingSet; -import org.openrdf.query.impl.BindingImpl; import org.openrdf.query.impl.MapBindingSet; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IConstant; import com.bigdata.bop.IVariable; -import com.bigdata.bop.Var; import com.bigdata.rdf.model.BigdataValue; import com.bigdata.striterator.ICloseableIterator; @@ -44,6 +40,8 @@ private final BindingSet constants; + private boolean open = true; + /** * * @param src @@ -69,8 +67,13 @@ public boolean hasNext() throws E { - return src.hasNext(); + if(open && src.hasNext()) + return true; + close(); + + return false; + } public BindingSet next() throws E { @@ -142,7 +145,13 @@ public void close() throws E { - src.close(); + if(open) { + + open = false; + + src.close(); + + } } Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2SesameIteration.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2SesameIteration.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Bigdata2SesameIteration.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -27,6 +27,8 @@ package com.bigdata.rdf.sail; +import java.util.NoSuchElementException; + import info.aduna.iteration.CloseableIteration; import com.bigdata.striterator.ICloseableIterator; @@ -48,6 +50,8 @@ private final ICloseableIterator<? extends T> src; + private boolean open = true; + public Bigdata2SesameIteration(final ICloseableIterator<? extends T> src) { if (src == null) @@ -59,24 +63,41 @@ public void close() throws E { - src.close(); + if (open) { + + open = false; + + src.close(); + + } } public boolean hasNext() throws E { - return src.hasNext(); + if(open && src.hasNext()) + return true; + close(); + + return false; + } public T next() throws E { + if (!hasNext()) + throw new NoSuchElementException(); + return src.next(); } public void remove() throws E { + if(!open) + throw new IllegalStateException(); + src.remove(); } Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataConstructIterator.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataConstructIterator.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataConstructIterator.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -1,9 +1,12 @@ package com.bigdata.rdf.sail; import info.aduna.iteration.CloseableIteration; + import java.util.Collection; import java.util.Iterator; import java.util.LinkedList; +import java.util.NoSuchElementException; + import org.openrdf.model.Resource; import org.openrdf.model.Statement; import org.openrdf.model.URI; @@ -11,13 +14,13 @@ import org.openrdf.model.ValueFactory; import org.openrdf.query.BindingSet; import org.openrdf.query.QueryEvaluationException; + import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.model.BigdataValue; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.SPO; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.store.BigdataStatementIterator; -import com.bigdata.rdf.store.IRawTripleStore; import com.bigdata.striterator.ChunkedWrappedIterator; import com.bigdata.striterator.ICloseableIterator; @@ -35,6 +38,8 @@ private final ValueFactory vf; + private boolean open = true; + public BigdataConstructIterator( final AbstractTripleStore db, final CloseableIteration<? extends BindingSet, QueryEvaluationException> src, @@ -53,6 +58,17 @@ } public boolean hasNext() throws QueryEvaluationException { + + if(open && _hasNext()) + return true; + + close(); + + return false; + + } + + private boolean _hasNext() throws QueryEvaluationException { if (stmtIt.hasNext()) { return true; } @@ -80,7 +96,13 @@ public void close() throws QueryEvaluationException { - stmtIt.close(); + if(open) { + + open = false; + + stmtIt.close(); + + } } @@ -89,12 +111,12 @@ * * @param bindingSet */ - private void addToLeftovers(BindingSet bindingSet) { + private void addToLeftovers(final BindingSet bindingSet) { - Resource subject = (Resource)bindingSet.getValue("subject"); - URI predicate = (URI)bindingSet.getValue("predicate"); - Value object = bindingSet.getValue("object"); - Resource context = (Resource)bindingSet.getValue("context"); + final Resource subject = (Resource)bindingSet.getValue("subject"); + final URI predicate = (URI)bindingSet.getValue("predicate"); + final Value object = bindingSet.getValue("object"); + final Resource context = (Resource)bindingSet.getValue("context"); if (context == null) { leftovers.add(vf.createStatement(subject, predicate, object)); } @@ -110,6 +132,8 @@ private SPO next; + private boolean open = true; + public SPOConverter( final CloseableIteration<? extends BindingSet, QueryEvaluationException> src) { @@ -118,10 +142,13 @@ } public void close() { - try { - src.close(); - } catch (QueryEvaluationException ex) { - throw new RuntimeException(ex); + if (open) { + open = false; + try { + src.close(); + } catch (QueryEvaluationException ex) { + throw new RuntimeException(ex); + } } } @@ -131,6 +158,17 @@ * need to handle that separately (without trying to resolve an SPO). */ public boolean hasNext() { + + if (open && _hasNext()) + return true; + + close(); + + return false; + + } + + private boolean _hasNext() { try { // we already have our next lined up if (next != null) { @@ -141,7 +179,7 @@ return false; } // pluck the next one out of the iterator - BindingSet bs = src.next(); + final BindingSet bs = src.next(); next = convert(bs); if (isValid(next)) { // if we can convert it we're good to go @@ -159,14 +197,17 @@ public SPO next() { // getting the next is actually handled in hasNext() - hasNext(); + if(!hasNext()) + throw new NoSuchElementException(); // clear out the next so we can get a new one - SPO spo = next; + final SPO spo = next; next = null; return spo; } public void remove() { + if(!open) + throw new IllegalStateException(); try { src.remove(); } catch (QueryEvaluationException ex) { @@ -178,7 +219,7 @@ * Make sure all of the three positions are non-null - i.e. the terms * actually exist in the lexicon (not always the case with construct). */ - private boolean isValid(SPO spo) { + private boolean isValid(final SPO spo) { return spo.s != null && spo.p != null && spo.o != null; @@ -188,10 +229,10 @@ * Convert a bindingset into an SPO. All values should already be * bigdata values, we dont' use db.getTermId(Value). */ - private SPO convert(BindingSet bindingSet) { - Value subject = bindingSet.getValue("subject"); - Value predicate = bindingSet.getValue("predicate"); - Value object = bindingSet.getValue("object"); + private SPO convert(final BindingSet bindingSet) { + final Value subject = bindingSet.getValue("subject"); + final Value predicate = bindingSet.getValue("predicate"); + final Value object = bindingSet.getValue("object"); IV s = null; if (subject instanceof BigdataValue) { s = ((BigdataValue) subject).getIV(); @@ -204,7 +245,7 @@ if (object instanceof BigdataValue) { o = ((BigdataValue) object).getIV(); } - SPO spo = new SPO(s, p, o); + final SPO spo = new SPO(s, p, o); return spo; } Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -2611,14 +2611,25 @@ // return new CloseableIteration</*? extends*/ Resource, SailException>() { return new CloseableIteration<Resource, SailException>() { - Resource next = null; + private Resource next = null; + private boolean open = true; public void close() throws SailException { - next = null; - itr2.close(); + if (open) { + open = false; + next = null; + itr2.close(); + } } public boolean hasNext() throws SailException { + if(open && _hasNext()) + return true; + close(); + return false; + } + + private boolean _hasNext() throws SailException { if (next != null) return true; while (itr2.hasNext()) { Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/HitConvertor.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/HitConvertor.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/HitConvertor.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -45,7 +45,8 @@ private BigdataValue next; - @SuppressWarnings("unchecked") + private boolean open = true; + public HitConvertor(final AbstractTripleStore database, final Iterator<IHit> src, final Var svar, final BindingSet bindings) { @@ -126,12 +127,29 @@ public void close() throws QueryEvaluationException { - src.close(); + if (open) { + + open = false; + + src.close(); + + } } public boolean hasNext() throws QueryEvaluationException { + if (open && _hasNext()) + return true; + + close(); + + return false; + + } + + private boolean _hasNext() throws QueryEvaluationException { + if (next != null) return true; Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/QueryEvaluationIterator.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/QueryEvaluationIterator.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/QueryEvaluationIterator.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -1,5 +1,7 @@ package com.bigdata.rdf.sail; +import java.util.NoSuchElementException; + import info.aduna.iteration.CloseableIteration; import org.openrdf.query.QueryEvaluationException; @@ -17,6 +19,8 @@ private final CloseableIteration<? extends T, SailException> src; + private boolean open = true; + public QueryEvaluationIterator( CloseableIteration<? extends T, SailException> src) { @@ -28,6 +32,17 @@ public boolean hasNext() throws QueryEvaluationException { + if(open && _hasNext()) + return true; + + close(); + + return false; + + } + + private boolean _hasNext() throws QueryEvaluationException { + try { return src.hasNext(); @@ -42,6 +57,9 @@ public T next() throws QueryEvaluationException { + if(!hasNext()) + throw new NoSuchElementException(); + try { return (T) src.next(); @@ -56,6 +74,9 @@ public void remove() throws QueryEvaluationException { + if (!open) + throw new IllegalStateException(); + try { src.remove(); @@ -70,16 +91,22 @@ public void close() throws QueryEvaluationException { - try { + if (open) { - src.close(); - - } catch(SailException ex) { - - throw new QueryEvaluationException(ex); - + open = false; + + try { + + src.close(); + + } catch (SailException ex) { + + throw new QueryEvaluationException(ex); + + } + } - + } } Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIteration.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIteration.java 2011-08-04 14:03:00 UTC (rev 5020) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIteration.java 2011-08-04 15:44:21 UTC (rev 5021) @@ -31,6 +31,8 @@ */ private E current = null; + private boolean open = true; + public RunningQueryCloseableIteration(final IRunningQuery runningQuery, final CloseableIteration<E, X> src) { @@ -39,13 +41,27 @@ } - public void close() throws X { - runningQuery.cancel(true/* mayInterruptIfRunning */); - src.close(); - } + public void close() throws X { + if (open) { + open = false; + runningQuery.cancel(true/* mayInterruptIfRunning */); + src.close(); + } + } - public boolean hasNext() throws X { + public boolean hasNext() throws X { + if (open && _hasNext()) + return true; + + close(); + + return false; + + } + + private boolean _hasNext() throws X { + if (current != null) { // Already buffered. return true; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-08-05 12:32:58
|
Revision: 5022 http://bigdata.svn.sourceforge.net/bigdata/?rev=5022&view=rev Author: thompsonbry Date: 2011-08-05 12:32:52 +0000 (Fri, 05 Aug 2011) Log Message: ----------- Lowered the log level for BigdataSail.addStatement() from INFO to DEBUG. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-08-04 15:44:21 UTC (rev 5021) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-08-05 12:32:52 UTC (rev 5022) @@ -2059,8 +2059,8 @@ public void addStatement(final Resource s, final URI p, final Value o, final Resource... contexts) throws SailException { - if (log.isInfoEnabled()) - log.info("s=" + s + ", p=" + p + ", o=" + o + ", contexts=" + if (log.isDebugEnabled()) + log.debug("s=" + s + ", p=" + p + ", o=" + o + ", contexts=" + Arrays.toString(contexts)); OpenRDFUtil.verifyContextNotNull(contexts); Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-08-04 15:44:21 UTC (rev 5021) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-08-05 12:32:52 UTC (rev 5022) @@ -2059,8 +2059,8 @@ public void addStatement(final Resource s, final URI p, final Value o, final Resource... contexts) throws SailException { - if (log.isInfoEnabled()) - log.info("s=" + s + ", p=" + p + ", o=" + o + ", contexts=" + if (log.isDebugEnabled()) + log.debug("s=" + s + ", p=" + p + ", o=" + o + ", contexts=" + Arrays.toString(contexts)); OpenRDFUtil.verifyContextNotNull(contexts); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-08-05 15:46:10
|
Revision: 5023 http://bigdata.svn.sourceforge.net/bigdata/?rev=5023&view=rev Author: thompsonbry Date: 2011-08-05 15:46:01 +0000 (Fri, 05 Aug 2011) Log Message: ----------- Modified AbstractRunningQuery to accept the original IChunkMessage which was submitted to the query. It will now release() that message, which causes close() on the underlying iterator pattern to be invoked. This fixes TestTicket361. Some more iterator/iteration implementations were identified with bad patterns in hasNext() such that they might not invoke close() in a timely manner. All tests likely to be affected by these changes are green. This change set is being committed to both the 1.0.0 maintenance branch and to the development branch. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/StandaloneChainedRunningQuery.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialAsynchronousIterator.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/ThickAsynchronousIterator.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/ICloseableIterator.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/relation/accesspath/TestMultiSourceSequentialAsynchronousIterator.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIterator.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket361.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/engine/StandaloneChainedRunningQuery.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialAsynchronousIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/ThickAsynchronousIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/striterator/ICloseableIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/test/com/bigdata/relation/accesspath/TestMultiSourceSequentialAsynchronousIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket361.java Removed Paths: ------------- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIteration.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIteration.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -165,6 +165,15 @@ */ final private IQueryClient clientProxy; + /** + * The original message which kicked off this query on the query controller. + * This is NOT required when the query is materialized on another node and + * MAY be <code>null</code>, but the original message used to kick off the + * query on the query controller MUST be provided so we can ensure that the + * source iteration is always closed when the query is cancelled. + */ + final private IChunkMessage<IBindingSet> realSource; + /** The query. */ final private PipelineOp query; @@ -452,6 +461,13 @@ * other than the query controller itself. * @param query * The query. + * @param realSource + * The original message which kicked off this query on the query + * controller. This is NOT required when the query is + * materialized on another node and MAY be <code>null</code>, but + * the original message used to kick off the query on the query + * controller MUST be provided so we can ensure that the source + * iteration is always closed when the query is cancelled. * * @throws IllegalArgumentException * if any argument is <code>null</code>. @@ -465,7 +481,8 @@ */ public AbstractRunningQuery(final QueryEngine queryEngine, final UUID queryId, final boolean controller, - final IQueryClient clientProxy, final PipelineOp query) { + final IQueryClient clientProxy, final PipelineOp query, + final IChunkMessage<IBindingSet> realSource) { if (queryEngine == null) throw new IllegalArgumentException(); @@ -489,6 +506,8 @@ this.query = query; + this.realSource = realSource; + this.bopIndex = BOpUtility.getIndex(query); /* @@ -1027,6 +1046,10 @@ /* * Do additional cleanup exactly once. */ + if (realSource != null) + realSource.release(); + // close() IAsynchronousIterators for accepted messages. + releaseAcceptedMessages(); // cancel any running operators for this query on this node. cancelled |= cancelRunningOperators(mayInterruptIfRunning); if (controller) { @@ -1088,6 +1111,23 @@ abstract protected boolean cancelRunningOperators( final boolean mayInterruptIfRunning); + /** + * Close the {@link IAsynchronousIterator} for any {@link IChunkMessage}s + * which have been <em>accepted</em> for this queue on this node (internal + * API). + * <p> + * Note: This must be invoked while holding a lock which is exclusive with + * the lock used to hand off {@link IChunkMessage}s to operator tasks + * otherwise we could wind up invoking {@link IAsynchronousIterator#close()} + * from on an {@link IAsynchronousIterator} running in a different thread. + * That would cause visibility problems in the close() semantics unless the + * {@link IAsynchronousIterator} is thread-safe for close (e.g., volatile + * write, synchronized, etc.). The appropriate lock for this is + * {@link AbstractRunningQuery#lock}. This method is only invoked out of + * {@link AbstractRunningQuery#cancel(boolean)} which owns that lock. + */ + abstract protected void releaseAcceptedMessages(); + // { // boolean cancelled = false; // Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -178,9 +178,9 @@ */ public ChunkedRunningQuery(final QueryEngine queryEngine, final UUID queryId, final boolean controller, final IQueryClient clientProxy, - final PipelineOp query) { + final PipelineOp query, final IChunkMessage<IBindingSet> realSource) { - super(queryEngine, queryId, controller, clientProxy, query); + super(queryEngine, queryId, controller, clientProxy, query, realSource); this.operatorFutures = new ConcurrentHashMap<BSBundle, ConcurrentHashMap<ChunkFutureTask,ChunkFutureTask>>(); @@ -216,6 +216,7 @@ if (isDone()) { // The query is no longer running. + msg.release(); return false; //throw new RuntimeException(ERR_QUERY_DONE, future.getCause()); } @@ -261,6 +262,9 @@ } catch(InterruptedException ex) { // wrap interrupt thrown out of queue.put(msg); + + msg.release(); + throw new RuntimeException(ex); } finally { @@ -518,6 +522,11 @@ * for this operator. */ final List<IChunkMessage<IBindingSet>> accepted = new LinkedList<IChunkMessage<IBindingSet>>(); + try { + /* + * Note: Once we drain these messages from the work queue we are + * responsible for calling release() on them. + */ queue.drainTo(accepted, pipelined ? maxMessagesPerTask : Integer.MAX_VALUE); // #of messages accepted from the work queue. @@ -545,11 +554,15 @@ * task. */ int nassigned = 1; + final Iterator<IChunkMessage<IBindingSet>> mitr = accepted.iterator(); final IMultiSourceAsynchronousIterator<IBindingSet[]> source = new MultiSourceSequentialAsynchronousIterator<IBindingSet[]>(// - accepted.remove(0).getChunkAccessor().iterator()// +// accepted.remove(0).getChunkAccessor().iterator()// + mitr.next().getChunkAccessor().iterator()// ); - for (IChunkMessage<IBindingSet> msg : accepted) { - source.add(msg.getChunkAccessor().iterator()); +// for (IChunkMessage<IBindingSet> msg : accepted) { +// source.add(msg.getChunkAccessor().iterator()); + while(mitr.hasNext()) { + source.add(mitr.next().getChunkAccessor().iterator()); nassigned++; } if (nassigned != naccepted) @@ -578,6 +591,17 @@ + naccepted+", runState="+runStateString()); getQueryEngine().execute(cft); return true; + } catch(Throwable t) { + try { + // Ensure messages are released(). + for (IChunkMessage<IBindingSet> msg : accepted) + msg.release(); + } catch (Throwable t2) { + log.error(t2, t2); + } + // wrap and rethrow cause. + throw new RuntimeException(t); + } } finally { lock.unlock(); } @@ -1111,6 +1135,17 @@ throw new Exception(t); } // otherwise ignore exception (normal completion). + } finally { + /* + * Ensure that the source is closed. + * + * TODO This is not being guarded by a lock so we might not + * safely publish the state change to the source iterator when + * it is closed. + */ + final IAsynchronousIterator<IBindingSet[]> src = context + .getSource(); + src.close(); } // Done. return null; @@ -1461,6 +1496,31 @@ } + @Override + protected void releaseAcceptedMessages() { + + for (Map.Entry<BSBundle, BlockingQueue<IChunkMessage<IBindingSet>>> e : operatorQueues + .entrySet()) { + + final BlockingQueue<IChunkMessage<IBindingSet>> queue = e.getValue(); + + if (queue.isEmpty()) + continue; + + final LinkedList<IChunkMessage<IBindingSet>> c = new LinkedList<IChunkMessage<IBindingSet>>(); + + queue.drainTo(c); + + for (IChunkMessage<IBindingSet> msg : c) { + + msg.release(); + + } + + } + + } + // @Override protected IChunkHandler getChunkHandler() { Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -44,7 +44,9 @@ void materialize(FederatedRunningQuery runningQuery); /** - * Discard the materialized data. + * Release all resources associated with this chunk. If the source has been + * opened, then ensure that it is closed. If the data has been materialized, + * then discard the materialized data. */ void release(); Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -96,13 +96,18 @@ } public void release() { - // NOP + source.close(); } public IChunkAccessor<E> getChunkAccessor() { - return new ChunkAccessor(); + if (chunkAccessor == null) { + chunkAccessor = new ChunkAccessor(); + } + return chunkAccessor; } - + + private volatile transient ChunkAccessor chunkAccessor = null; + private class ChunkAccessor implements IChunkAccessor<E> { public IAsynchronousIterator<E[]> iterator() { Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -236,11 +236,11 @@ * <pre> * public MyRunningQuery(QueryEngine queryEngine, UUID queryId, * boolean controller, IQueryClient clientProxy, - * PipelineOp query) + * PipelineOp query, IChunkMessage<IBindingSet> realSource) * </pre> * * Note that classes derived from {@link QueryEngine} may override - * {@link QueryEngine#newRunningQuery(QueryEngine, UUID, boolean, IQueryClient, PipelineOp)} + * {@link QueryEngine#newRunningQuery(QueryEngine, UUID, boolean, IQueryClient, PipelineOp, IChunkMessage)} * in which case they might not support this option. */ String RUNNING_QUERY_CLASS = (QueryEngine.class.getName() @@ -719,51 +719,58 @@ public void run() { if(log.isInfoEnabled()) log.info("Running: " + this); - while (true) { - try { - final AbstractRunningQuery q = queue.take(); - if (!q.isDone()) - q.consumeChunk(); - } catch (InterruptedException e) { - /* - * Note: Uncomment the stack trace here if you want to find - * where the query was interrupted. - * - * Note: If you want to find out who interrupted the query, - * then you can instrument BlockingBuffer#close() in - * PipelineOp#newBuffer(stats). - */ - if (log.isInfoEnabled()) - log.info("Interrupted." -// ,e - ); - return; - } catch (Throwable t) { - // log and continue - log.error(t, t); - continue; - } + try { + while (true) { + try { + final AbstractRunningQuery q = queue.take(); + if (!q.isDone()) + q.consumeChunk(); + } catch (InterruptedException e) { + /* + * Note: Uncomment the stack trace here if you want to + * find where the query was interrupted. + * + * Note: If you want to find out who interrupted the + * query, then you can instrument BlockingBuffer#close() + * in PipelineOp#newBuffer(stats). + */ + if (log.isInfoEnabled()) + log.info("Interrupted." + // ,e + ); + return; + } catch (Throwable t) { + // log and continue + log.error(t, t); + continue; + } + } // while(true) + } finally { + if (log.isInfoEnabled()) + log.info("QueryEngineTask is done."); } } } // QueryEngineTask - /** - * Add a chunk of intermediate results for consumption by some query. The - * chunk will be attached to the query and the query will be scheduled for - * execution. - * - * @param msg - * A chunk of intermediate results. - * - * @return <code>true</code> if the chunk was accepted. This will return - * <code>false</code> if the query is done (including cancelled) or - * the query engine is shutdown. - * - * @throws IllegalArgumentException - * if the chunk is <code>null</code>. - * @throws IllegalStateException - * if the chunk is not materialized. - */ + /** + * Add a chunk of intermediate results for consumption by some query. The + * chunk will be attached to the query and the query will be scheduled for + * execution. + * + * @param msg + * A chunk of intermediate results. + * + * @return <code>true</code> if the chunk was accepted. This will return + * <code>false</code> if the query is done (including cancelled) or + * the query engine is shutdown. The {@link IChunkMessage} will have + * been {@link IChunkMessage#release() released} if it was not + * accepted. + * + * @throws IllegalArgumentException + * if the chunk is <code>null</code>. + * @throws IllegalStateException + * if the chunk is not materialized. + */ protected boolean acceptChunk(final IChunkMessage<IBindingSet> msg) { if (msg == null) @@ -784,12 +791,14 @@ // add chunk to the query's input queue on this node. if (!q.acceptChunk(msg)) { // query is no longer running. + msg.release(); return false; } if(!isRunning()) { // query engine is no longer running. + msg.release(); return false; } @@ -829,12 +838,16 @@ // stop the query engine. final Future<?> f = engineFuture.get(); if (f != null) { + if(log.isInfoEnabled()) + log.info("Cancelling engineFuture: "+this); f.cancel(true/* mayInterruptIfRunning */); } // stop the service on which we ran the query engine. final ExecutorService s = engineService.get(); if (s != null) { + if(log.isInfoEnabled()) + log.info("Terminating engineService: "+this); s.shutdownNow(); } @@ -861,12 +874,17 @@ // stop the query engine. final Future<?> f = engineFuture.get(); - if (f != null) + if (f != null) { + if (log.isInfoEnabled()) + log.info("Cancelling engineFuture: " + this); f.cancel(true/* mayInterruptIfRunning */); - + } + // stop the service on which we ran the query engine. final ExecutorService s = engineService.get(); if (s != null) { + if (log.isInfoEnabled()) + log.info("Terminating engineService: "+this); s.shutdownNow(); } @@ -1097,7 +1115,7 @@ final AbstractRunningQuery runningQuery = newRunningQuery( /* this, */queryId, true/* controller */, - getProxy()/* queryController */, query); + getProxy()/* queryController */, query, msg/*realSource*/); final long timeout = query.getProperty(BOp.Annotations.TIMEOUT, BOp.Annotations.DEFAULT_TIMEOUT); @@ -1162,7 +1180,7 @@ // tell query to consume the initial chunk. acceptChunk(msg); - + return runningQuery; } @@ -1401,7 +1419,7 @@ protected AbstractRunningQuery newRunningQuery( /*final QueryEngine queryEngine,*/ final UUID queryId, final boolean controller, final IQueryClient clientProxy, - final PipelineOp query) { + final PipelineOp query, final IChunkMessage<IBindingSet> realSource) { final String className = query.getProperty( Annotations.RUNNING_QUERY_CLASS, @@ -1426,11 +1444,11 @@ final Constructor<? extends IRunningQuery> ctor = cls .getConstructor(new Class[] { QueryEngine.class, UUID.class, Boolean.TYPE, IQueryClient.class, - PipelineOp.class }); + PipelineOp.class, IChunkMessage.class }); // save reference. runningQuery = ctor.newInstance(new Object[] { this, queryId, - controller, clientProxy, query }); + controller, clientProxy, query, realSource }); } catch (Exception ex) { Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/StandaloneChainedRunningQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/StandaloneChainedRunningQuery.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/engine/StandaloneChainedRunningQuery.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -134,9 +134,10 @@ */ public StandaloneChainedRunningQuery(final QueryEngine queryEngine, final UUID queryId, final boolean controller, - final IQueryClient clientProxy, final PipelineOp query) { + final IQueryClient clientProxy, final PipelineOp query, + final IChunkMessage<IBindingSet> realSource) { - super(queryEngine, queryId, controller, clientProxy, query); + super(queryEngine, queryId, controller, clientProxy, query, realSource); this.operatorQueues = new ConcurrentHashMap<Integer/* bopId */, MultiplexBlockingBuffer<IBindingSet[]>>(); @@ -410,6 +411,25 @@ } + /* + * TODO Review this. I made the change at a time when we were not using the + * StandaloneRunningQueryClass. It is responsible for invoking + * message.release(), which is is not really doing. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/361 + */ + @Override + protected void releaseAcceptedMessages() { + + for (MultiplexBlockingBuffer<IBindingSet[]> buffer : operatorQueues + .values()) { + + buffer.flushAndCloseAll(); + + } + + } + /** * Handles various handshaking with the {@link AbstractRunningQuery} and the * {@link RunState} for the query. Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -431,7 +431,7 @@ final FederatedRunningQuery q = newRunningQuery( /*FederatedQueryEngine.this,*/ queryId, false/* controller */, - msg.getQueryController(), query); + msg.getQueryController(), query, msg); return (FederatedRunningQuery) putIfAbsent(queryId, q); @@ -445,7 +445,7 @@ final FederatedRunningQuery q = newRunningQuery(/*this, */queryId, false/* controller */, queryDecl.getQueryController(), - queryDecl.getQuery()); + queryDecl.getQuery(), null/*realSource*/); putIfAbsent(queryId, q); @@ -522,10 +522,10 @@ protected FederatedRunningQuery newRunningQuery( /*final QueryEngine queryEngine,*/ final UUID queryId, final boolean controller, final IQueryClient clientProxy, - final PipelineOp query) { + final PipelineOp query, final IChunkMessage<IBindingSet> realSource) { return new FederatedRunningQuery(this/*queryEngine*/, queryId, controller, - clientProxy, query); + clientProxy, query, realSource); } @@ -565,6 +565,18 @@ throw new RuntimeException(e); } + if (proxy == null) { + + /* + * Note: Presumably this is due to the concurrent tear down of + * the peer. + */ + + throw new RuntimeException("No query engine on service: " + + serviceUUID); + + } + IQueryPeer tmp = proxyMap.putIfAbsent(serviceUUID, proxy); if (tmp != null) { Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -240,9 +240,10 @@ */ public FederatedRunningQuery(final FederatedQueryEngine queryEngine, final UUID queryId, final boolean controller, - final IQueryClient clientProxy, final PipelineOp query) { + final IQueryClient clientProxy, final PipelineOp query, + final IChunkMessage<IBindingSet> realSource) { - super(queryEngine, queryId, /*begin, */controller, clientProxy, query); + super(queryEngine, queryId, /*begin, */controller, clientProxy, query, realSource); /* * Note: getServiceUUID() should be a smart proxy method and thus not Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -341,6 +341,12 @@ */ public void release() { + if (chunkAccessor != null) { + + chunkAccessor.close(); + + } + final List<IAllocation> tmp = materialized; if (tmp != null) { @@ -423,9 +429,17 @@ public IChunkAccessor<E> getChunkAccessor() { - return new ChunkAccessor(); - + if (chunkAccessor == null) { + + chunkAccessor = new ChunkAccessor(); + + } + + return chunkAccessor; + } + + private volatile transient ChunkAccessor chunkAccessor = null; /** * FIXME Provide in place decompression and read out of the binding sets. @@ -437,23 +451,38 @@ */ private class ChunkAccessor implements IChunkAccessor<E> { - public IAsynchronousIterator<E[]> iterator() { - + private final IAsynchronousIterator<E[]> source; + + public ChunkAccessor() { + final List<IAllocation> tmp = materialized; if (tmp == null) throw new UnsupportedOperationException(); - return new DeserializationIterator(materialized.iterator()); + source = new DeserializationIterator(materialized.iterator()); } + + public IAsynchronousIterator<E[]> iterator() { + + return source; + + } + + public void close() { + source.close(); + + } + } private class DeserializationIterator implements IAsynchronousIterator<E[]> { private final Iterator<IAllocation> src; - + private volatile boolean open = true; + public DeserializationIterator(final Iterator<IAllocation> src) { this.src = src; @@ -461,13 +490,26 @@ } public void close() { + + if(open) { + + open = false; + + // TODO Anything to discard? + + } } public boolean hasNext() { - return src.hasNext(); + if(open && src.hasNext()) + return true; + close(); + + return false; + } @SuppressWarnings("unchecked") Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -183,9 +183,12 @@ } public void release() { - // NOP + if (chunkAccessor != null) + chunkAccessor.close(); } + private transient volatile ChunkAccessor chunkAccessor = null; + public IChunkAccessor<E> getChunkAccessor() { return new ChunkAccessor(); @@ -202,11 +205,19 @@ */ private class ChunkAccessor implements IChunkAccessor<E> { + private final IAsynchronousIterator<E[]> source; + + public ChunkAccessor() { + source = new DeserializationIterator(); + } + public IAsynchronousIterator<E[]> iterator() { - - return new DeserializationIterator(); - + return source; } + + public void close() { + source.close(); + } } @@ -214,7 +225,7 @@ private volatile ObjectInputStream ois; private E[] current = null; - + public DeserializationIterator() { try { @@ -234,9 +245,20 @@ } - @SuppressWarnings("unchecked") public boolean hasNext() { + if (ois != null && _hasNext()) + return true; + + close(); + + return false; + + } + + @SuppressWarnings("unchecked") + private boolean _hasNext() { + if (current != null) return true; Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialAsynchronousIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialAsynchronousIterator.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialAsynchronousIterator.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -33,6 +33,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; +import org.apache.log4j.Logger; /** * Class allows new sources to be attached dynamically. If the existing sources @@ -45,6 +46,8 @@ public class MultiSourceSequentialAsynchronousIterator<E> implements IMultiSourceAsynchronousIterator<E> { + private final static Logger log = Logger.getLogger(MultiSourceSequentialAsynchronousIterator.class); + private final ReentrantLock lock = new ReentrantLock(); private final Queue<IAsynchronousIterator<E>> sources = new LinkedBlockingQueue<IAsynchronousIterator<E>>(); @@ -62,7 +65,7 @@ * testing that variable. */ private volatile IAsynchronousIterator<E> current; - + public MultiSourceSequentialAsynchronousIterator(final IAsynchronousIterator<E> src) { current = src; } @@ -70,7 +73,24 @@ public void close() { lock.lock(); try { - current = null; + /* + * Ensure that all sources are eventually closed. + */ + // close the current source (if any). + final IAsynchronousIterator<E> current = this.current; + this.current = null; + if (current != null) { + if (log.isInfoEnabled()) + log.info("Closing source: " + current); + current.close(); + } + // Close any sources still in the queue. + for(IAsynchronousIterator<E> t : sources) { + if (log.isInfoEnabled()) + log.info("Closing source: " + t); + t.close(); + } + // Clear the queue. sources.clear(); } finally { lock.unlock(); @@ -110,10 +130,20 @@ // current is known to be [null]. lock.lock(); try { + /* Close iterator which has been consumed. + * + */ + if (log.isInfoEnabled()) + log.info("Closing source: " + current); + current.close(); // remove the head of the queue (non-blocking) while ((current = sources.poll()) != null) { - if (!current.isExhausted()) + if (!current.isExhausted()) { return current; + } else { + // Note: should already be closed since exhausted. + current.close(); + } } // no more sources with data, close while holding lock. close(); @@ -155,6 +185,14 @@ throw new UnsupportedOperationException(); } + /** + * {@inheritDoc} + * <p> + * Note: This will report <code>true</code> iff all iterators currently + * attached have been consumed, at which point {@link #current} becomes + * <code>null</code> and no more iterators may be attached, hence the high + * level iterator is provably exhausted. + */ public boolean isExhausted() { return nextSource() == null; } Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/ThickAsynchronousIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/ThickAsynchronousIterator.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/ThickAsynchronousIterator.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -91,7 +91,12 @@ public boolean hasNext() { - return open && lastIndex + 1 < a.length; + if(open && lastIndex + 1 < a.length) + return true; + + close(); + + return false; } Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -51,7 +51,7 @@ // private static final Logger log = Logger.getLogger(WrappedAsynchronousIterator.class); - private transient boolean open = true; + private transient volatile boolean open = true; private final IChunkedIterator<F> src; @@ -121,13 +121,8 @@ open = false; -// if (src instanceof ICloseableIterator<?>) { -// if (log.isDebugEnabled()) -// log.debug("Close", new RuntimeException()); - ((ICloseableIterator<?>) src).close(); + src.close(); -// } - } } Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -78,6 +78,8 @@ */ private F[] chunk = null; + private volatile boolean open = true; + // /** // * Total elapsed time for the iterator instance. // */ @@ -248,7 +250,7 @@ if (log.isInfoEnabled()) log.info("Finished: nchunks=" + nchunks + ", nelements=" + nelements + ", elapsed=" + elapsed - + "ms, sink.open" + buffer.isOpen()); + + "ms, sink.open=" + buffer.isOpen()); return nelements; @@ -256,8 +258,20 @@ try { src.close(); - } finally { - buffer.close(); + } finally { + /* + * Note: Close the buffer since nothing more will be written + * on it, but DO NOT close the iterator draining the buffer + * (aka [resolvedItr]) since the consumer will use that to + * drain the buffer. + * + * Note: Failure to close the buffer here will cause a + * severe performance penalty. + * + * Note: Closing the [resolvedItr] here will cause data to + * be lost. + */ + buffer.close(); } } @@ -286,13 +300,24 @@ */ public boolean hasNext() { + if(open && _hasNext()) + return true; + + close(); + + return false; + + } + + private boolean _hasNext() { + if (resolvedItr == null) { throw new IllegalStateException(); } - if (lastIndex != -1 && chunk!=null && lastIndex + 1 < chunk.length) { + if (lastIndex != -1 && chunk != null && lastIndex + 1 < chunk.length) { return true; @@ -310,7 +335,7 @@ if (!hasNext()) throw new NoSuchElementException(); - if (lastIndex == -1 || chunk!=null && lastIndex + 1 == chunk.length) { + if (lastIndex == -1 || chunk != null && lastIndex + 1 == chunk.length) { // get the next chunk of resolved BigdataStatements. chunk = resolvedItr.next(); @@ -349,28 +374,38 @@ public void close() { - if (log.isInfoEnabled()) - log.info("lastIndex=" + lastIndex + ", chunkSize=" - + (chunk != null ? "" + chunk.length : "N/A")); + if (open) { - /* - * Explicitly close the source since we will not be reading anything - * more from it. - * - * @see https://sourceforge.net/apps/trac/bigdata/ticket/361 - */ - src.close(); + open = false; - /* - * Close the sink as well. The thread draining the sink's iterator will - * notice that it has been closed. It will still drain anything already - * buffered in the sink, but then the iterator() will report that no - * more data is available rather than blocking. - */ - buffer.close(); + if (log.isInfoEnabled()) + log.info("lastIndex=" + lastIndex + ", chunkSize=" + + (chunk != null ? "" + chunk.length : "N/A")); - chunk = null; - + /* + * Explicitly close the source since we will not be reading anything + * more from it. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/361 + */ + src.close(); + + /* + * Close the sink since nothing more will be written on it. + */ + buffer.close(); + + /* + * Since the outer iterator is being closed, nothing more will be + * read from the buffer so we also close the iterator draining the + * buffer. + */ + resolvedItr.close(); + + chunk = null; + + } + } } Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -47,7 +47,7 @@ private static transient final Logger log = Logger.getLogger(ChunkedWrappedIterator.class); - private boolean open = true; + private volatile boolean open = true; private final Class<? extends E> elementClass; @@ -168,19 +168,20 @@ public void close() { - if (!open) - return; - - open = false; + if (open) { - if(realSource instanceof ICloseableIterator) { - - ((ICloseableIterator<E>)realSource).close(); - + open = false; + + if (realSource instanceof ICloseableIterator) { + + ((ICloseableIterator<E>) realSource).close(); + + } + + if (log.isInfoEnabled()) + log.info("#chunks=" + nchunks + ", #elements=" + nelements); + } - - if(log.isInfoEnabled()) - log.info("#chunks="+nchunks+", #elements="+nelements); } Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/ICloseableIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/ICloseableIterator.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/striterator/ICloseableIterator.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -42,7 +42,10 @@ /** * Closes the iterator, releasing any associated resources. This method MAY - * be invoked safely if the iterator is already closed. + * be invoked safely if the iterator is already closed. Implementations of + * this interface MUST invoke {@link #close()} if {@link Iterator#hasNext()} + * method returns <code>false</code> to ensure that the iterator is closed + * (and its resources release) as soon as it is exhausted. * <p> * Note: Implementations that support {@link Iterator#remove()} MUST NOT * eagerly close the iterator when it is exhausted since that would make it Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/relation/accesspath/TestMultiSourceSequentialAsynchronousIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/relation/accesspath/TestMultiSourceSequentialAsynchronousIterator.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/relation/accesspath/TestMultiSourceSequentialAsynchronousIterator.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -27,11 +27,15 @@ package com.bigdata.relation.accesspath; +import java.io.Serializable; +import java.util.NoSuchElementException; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; -import com.bigdata.relation.accesspath.IAsynchronousIterator; -import com.bigdata.relation.accesspath.ThickAsynchronousIterator; - import junit.framework.TestCase2; /** @@ -50,10 +54,14 @@ super(name); } - private final IAsynchronousIterator<String> emptyIterator() { + private final ThickAsynchronousIterator<String> emptyIterator() { return new ThickAsynchronousIterator<String>(new String[]{}); } + private final ThickAsynchronousIterator<String> iterator(final String... a) { + return new ThickAsynchronousIterator<String>(a); + } + public void test1() throws InterruptedException { // empty iterator. @@ -135,37 +143,243 @@ * Verify that the iterator notices if it is asynchronously closed. * * @throws InterruptedException + * @throws ExecutionException */ - public void test3() throws InterruptedException { + public void test3() throws InterruptedException, ExecutionException { // empty iterator. final MultiSourceSequentialAsynchronousIterator<String> itr = new MultiSourceSequentialAsynchronousIterator<String>( emptyIterator()); - new Thread() { + final ExecutorService service = Executors.newSingleThreadExecutor(); - public void run() { - try { + try { + + final FutureTask<Void> ft = new FutureTask<Void>(new Callable<Void>() { + + public Void call() throws Exception { + log.info("Will wait on iterator."); - if (itr.hasNext(2000, TimeUnit.MILLISECONDS)) + + if (itr.hasNext(1000, TimeUnit.MILLISECONDS)) fail("Iterator should not visit anything."); - } catch (Throwable t) { - log.error(t, t); + + // Can not add more sources. + assertFalse(itr.add(new ThickAsynchronousIterator<String>( + new String[] { "b" }))); + + return null; + } - } + + }); + + service.submit(ft); - }.start(); + Thread.sleep(500/*ms*/); + + log.info("Will close iterator."); + itr.close(); - log.info("Sleeping..."); - Thread.sleep(500/*milliseconds.*/); + // check future. + ft.get(); + } finally { + + service.shutdownNow(); + + } + + } + + /** + * Verify that the iterator closes all sources iterators when it is closed. + * + * @throws InterruptedException + */ + public void test4_sources_closed() throws InterruptedException { + + final ThickAsynchronousIterator<String> itr1 = iterator("a","b","c"); + + // empty iterator. + final MultiSourceSequentialAsynchronousIterator<String> itr = new MultiSourceSequentialAsynchronousIterator<String>( + itr1); + + assertEquals("a", itr.next()); +// assertEquals("b", itr.next()); + + // more is available from the high level iterator. + assertTrue(itr.hasNext()); + + // more is available from the underlying iterator. + assertTrue(itr1.hasNext()); + log.info("Will close iterator."); itr.close(); // can not add more sources. - assertFalse(itr.add(new ThickAsynchronousIterator<String>( - new String[] { "b" }))); + assertFalse(itr.add(iterator("d"))); + // underlying iterator was closed. + assertFalse(itr1.open); + assertFalse(itr1.hasNext()); + + // high level iterator was closed. + assertFalse(itr.hasNext()); + } - + + /** + * Verify that sources are closed when there is more than one source. + * + * @throws InterruptedException + */ + public void test5_sources_closed() throws InterruptedException { + + final ThickAsynchronousIterator<String> itr1 = iterator("a","b","c"); + final ThickAsynchronousIterator<String> itr2 = iterator("d","e","f"); + final ThickAsynchronousIterator<String> itr3 = iterator("g","h","i"); + + // empty iterator. + final MultiSourceSequentialAsynchronousIterator<String> itr = new MultiSourceSequentialAsynchronousIterator<String>( + itr1); + itr.add(itr2); + itr.add(itr3); + + assertEquals("a", itr.next()); + assertEquals("b", itr.next()); + assertEquals("c", itr.next()); + + // more is available from the high level iterator. + assertTrue(itr.hasNext()); + + // 1st underlying iterator was closed. + assertFalse(itr1.hasNext()); + + log.info("Will close iterator."); + itr.close(); + + // can not add more sources. + assertFalse(itr.add(iterator("xxx"))); + + // remaining underlying iterators were closed. + assertFalse(itr1.open); + assertFalse(itr1.hasNext()); + assertFalse(itr2.open); + assertFalse(itr2.hasNext()); + assertFalse(itr3.open); + assertFalse(itr3.hasNext()); + + // high level iterator was closed. + assertFalse(itr.hasNext()); + + } + + private static class ThickAsynchronousIterator<E> implements + IAsynchronousIterator<E>, Serializable { + + private static final long serialVersionUID = 1L; + + private transient boolean open = true; + + /** + * Index of the last element visited by {@link #next()} and + * <code>-1</code> if NO elements have been visited. + */ + private int lastIndex; + + /** + * The array of elements to be visited by the iterator. + */ + private final E[] a; + + /** + * Create a thick iterator. + * + * @param a + * The array of elements to be visited by the iterator (may + * be empty, but may not be <code>null</code>). + * + * @throws IllegalArgumentException + * if <i>a</i> is <code>null</code>. + */ + public ThickAsynchronousIterator(final E[] a) { + + if (a == null) + throw new IllegalArgumentException(); + + this.a = a; + + lastIndex = -1; + + } + + public boolean hasNext() { + + if(open && lastIndex + 1 < a.length) + return true; + + close(); + + return false; + + } + + public E next() { + + if (!hasNext()) + throw new NoSuchElementException(); + + return a[++lastIndex]; + + } + + public void remove() { + + throw new UnsupportedOperationException(); + + } + + /* + * ICloseableIterator. + */ + + public void close() { + + open = false; + + } + + /* + * IAsynchronousIterator. + */ + + public boolean isExhausted() { + + return !hasNext(); + + } + + /** + * Delegates to {@link #hasNext()} since all data are local and timeouts + * can not occur. + */ + public boolean hasNext(long timeout, TimeUnit unit) { + + return hasNext(); + + } + + /** + * Delegates to {@link #next()} since all data are local and timeouts + * can not occur. + */ + public E next(long timeout, TimeUnit unit) { + + return next(); + + } + + } + } Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -143,7 +143,6 @@ import com.bigdata.rdf.store.BD; import com.bigdata.rdf.store.BigdataBindingSetResolverator; import com.bigdata.rdf.store.BigdataOpenRDFBindingSetsResolverator; -import com.bigdata.relation.accesspath.AccessPath; import com.bigdata.relation.accesspath.ElementFilter; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBuffer; @@ -162,7 +161,6 @@ import com.bigdata.striterator.ChunkedWrappedIterator; import com.bigdata.striterator.Dechunkerator; import com.bigdata.striterator.DistinctFilter; -import com.bigdata.striterator.IChunkedIterator; import com.bigdata.striterator.IChunkedOrderedIterator; import com.bigdata.striterator.ICloseableIterator; @@ -1103,6 +1101,8 @@ // ensure query is halted. runningQuery.cancel(true/* mayInterruptIfRunning */); } + // ensure source is closed on error path. + source.close(); /* * Note: Do not wrap as a different exception type. The caller is * looking for this. @@ -1113,6 +1113,8 @@ // ensure query is halted. runningQuery.cancel(true/* mayInterruptIfRunning */); } + // ensure source is closed on error path. + source.close(); throw new QueryEvaluationException(t); } Deleted: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIteration.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIteration.java 2011-08-05 12:32:52 UTC (rev 5022) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIteration.java 2011-08-05 15:46:01 UTC (rev 5023) @@ -1,129 +0,0 @@ -package com.bigdata.rdf.sail; - -import info.aduna.iteration.CloseableIteration; - -import java.util.NoSuchElementException; - -import org.openrdf.query.BindingSet; -import org.openrdf.query.QueryEvaluationException; - -import com.bigdata.bop.engine.IRunningQuery; - -/** - * Iteration construct wraps an {@link IRunningQuery} with logic to (a) verify - * that the {@link IRunningQuery} has not encountered an error; and (b) to cancel - * the {@link IRunningQuery} when the iteration is {@link #close() closed}. - * @author thompsonbry - * - * @param <E> - * @param <X> - */ -public class RunningQueryCloseableIteration<E extends BindingSet, X extends QueryEvaluationException> - implements CloseableIteration<E, X> { - - private final IRunningQuery runningQuery; - private final CloseableIteration<E, X> src; - private boolean checkedFuture = false; - /** - * The next element is buffered so we can always return it if the - * {@link #runningQuery} was not aborted at the time that {@link #hasNext()} - * return <code>true</code>. - */ - private E current = null; - - private boolean open = true; - - public RunningQueryCloseableIteration(final IRunningQuery runningQuery, - final CloseableIteration<E, X> src) { - - this.runningQuery = runningQuery; - this.src = src; - - } - - public void close() throws X { - if (open) { - open = false; - runningQuery.cancel(true/* mayInterruptIfRunning */); - src.close(); - } - } - - public boolean hasNext() throws X { - - if (open && _hasNext()) - return true; - - close(); - - return false; - - } - - private boolean _hasNext() throws X { - - if (current != null) { - // Already buffered. - return true; - } - - if (!src.hasNext()) { - // Source is exhausted. - return false; - } - - // buffer the next element. - current = src.next(); - - // test for abnormal completion of the runningQuery. - if (!checkedFuture && runningQuery.isDone()) { - try { - runningQuery.get(); - } catch (InterruptedException e) { - /* - * Interrupted while waiting on the Future (should not happen - * since the Future is already done). - */ - throw (X) new QueryEvaluationException(e); - } catch (Throwable e) { - /* - * Exception thrown by the runningQuery. - */ - if (runningQu... [truncated message content] |
From: <tho...@us...> - 2011-08-24 13:20:34
|
Revision: 5081 http://bigdata.svn.sourceforge.net/bigdata/?rev=5081&view=rev Author: thompsonbry Date: 2011-08-24 13:20:28 +0000 (Wed, 24 Aug 2011) Log Message: ----------- Moved a misplaced closing bracket per [1]. [1] https://sourceforge.net/apps/trac/bigdata/ticket/371 (DataLoader fails to load resources available from the classpath.) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java branches/TERMS_REFACTOR_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2011-08-24 13:04:59 UTC (rev 5080) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2011-08-24 13:20:28 UTC (rev 5081) @@ -749,26 +749,28 @@ // Next try searching for the desired resource from the root // of the jar; that is, search the jar file for an exact match // of the input string. - rdfStream = - getClass().getClassLoader().getResourceAsStream(resource); + rdfStream = getClass().getClassLoader().getResourceAsStream( + resource); if (rdfStream == null) { - /* - * If we do not find as a Resource then try the file system. - */ - - final File file = new File(resource); - - if(file.exists()) { - - loadFiles(totals, 0/* depth */, file, baseURL, - rdfFormat, null, filter, endOfBatch); + /* + * If we do not find as a Resource then try the file system. + */ - return; - + final File file = new File(resource); + + if (file.exists()) { + + loadFiles(totals, 0/* depth */, file, baseURL, rdfFormat, + null, filter, endOfBatch); + + return; + + } + } - + } /* @@ -803,8 +805,6 @@ } - } - } /** Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2011-08-24 13:04:59 UTC (rev 5080) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2011-08-24 13:20:28 UTC (rev 5081) @@ -753,28 +753,30 @@ // Next try searching for the desired resource from the root // of the jar; that is, search the jar file for an exact match // of the input string. - rdfStream = - getClass().getClassLoader().getResourceAsStream(resource); + rdfStream = getClass().getClassLoader().getResourceAsStream( + resource); if (rdfStream == null) { - /* - * If we do not find as a Resource then try the file system. - */ - - final File file = new File(resource); - - if(file.exists()) { - - loadFiles(totals, 0/* depth */, file, baseURL, - rdfFormat, null, filter, endOfBatch); + /* + * If we do not find as a Resource then try the file system. + */ - return; - + final File file = new File(resource); + + if (file.exists()) { + + loadFiles(totals, 0/* depth */, file, baseURL, rdfFormat, + null, filter, endOfBatch); + + return; + + } + } - + } - + /* * Obtain a buffered reader on the input stream. */ @@ -807,8 +809,6 @@ } - } - } /** This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-09-05 10:53:48
|
Revision: 5129 http://bigdata.svn.sourceforge.net/bigdata/?rev=5129&view=rev Author: thompsonbry Date: 2011-09-05 10:53:41 +0000 (Mon, 05 Sep 2011) Log Message: ----------- Found a problem in RunningQueryCloseableIterator where it was not checking the Future of the IRunningQuery in close(). See https://sourceforge.net/apps/trac/bigdata/ticket/361 (Code review of openrdf CloseableIterations and layering of IRunningQuery.iterator()) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIterator.java branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIterator.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIterator.java 2011-09-04 11:24:05 UTC (rev 5128) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIterator.java 2011-09-05 10:53:41 UTC (rev 5129) @@ -17,6 +17,7 @@ private final IRunningQuery runningQuery; private final ICloseableIterator<E> src; private boolean checkedFuture = false; + /** * The next element is buffered so we can always return it if the * {@link #runningQuery} was not aborted at the time that {@link #hasNext()} @@ -34,11 +35,41 @@ } + /** + * Test for abnormal completion of the {@link IRunningQuery}. + */ + private void checkFuture() { + +// if (!checkedFuture && runningQuery.isDone()) { + try { + runningQuery.get(); + } catch (InterruptedException e) { + /* + * Interrupted while waiting on the Future (should not happen + * since the Future is already done). + */ + throw new RuntimeException(e); + } catch (Throwable e) { + /* + * Exception thrown by the runningQuery. + */ + if (runningQuery.getCause() != null) { + // abnormal termination - wrap and rethrow. + throw new RuntimeException(e); + } + // otherwise this is normal termination. + } + checkedFuture = true; +// } + + } + public void close() { if (open) { open = false; runningQuery.cancel(true/* mayInterruptIfRunning */); src.close(); + checkFuture(); } } @@ -68,34 +99,17 @@ // buffer the next element. current = src.next(); - // test for abnormal completion of the runningQuery. - if (!checkedFuture && runningQuery.isDone()) { - try { - runningQuery.get(); - } catch (InterruptedException e) { - /* - * Interrupted while waiting on the Future (should not happen - * since the Future is already done). - */ - throw new RuntimeException(e); - } catch (Throwable e) { - /* - * Exception thrown by the runningQuery. - */ - if (runningQuery.getCause() != null) { - // abnormal termination - wrap and rethrow. - throw new RuntimeException(e); - } - // otherwise this is normal termination. - } - checkedFuture = true; - } + if (!checkedFuture && runningQuery.isDone()) { + checkFuture(); + + } + // the next element is now buffered. return true; } - + public E next() { if (!hasNext()) Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIterator.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIterator.java 2011-09-04 11:24:05 UTC (rev 5128) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/RunningQueryCloseableIterator.java 2011-09-05 10:53:41 UTC (rev 5129) @@ -17,6 +17,7 @@ private final IRunningQuery runningQuery; private final ICloseableIterator<E> src; private boolean checkedFuture = false; + /** * The next element is buffered so we can always return it if the * {@link #runningQuery} was not aborted at the time that {@link #hasNext()} @@ -34,11 +35,41 @@ } + /** + * Test for abnormal completion of the {@link IRunningQuery}. + */ + private void checkFuture() { + +// if (!checkedFuture && runningQuery.isDone()) { + try { + runningQuery.get(); + } catch (InterruptedException e) { + /* + * Interrupted while waiting on the Future (should not happen + * since the Future is already done). + */ + throw new RuntimeException(e); + } catch (Throwable e) { + /* + * Exception thrown by the runningQuery. + */ + if (runningQuery.getCause() != null) { + // abnormal termination - wrap and rethrow. + throw new RuntimeException(e); + } + // otherwise this is normal termination. + } + checkedFuture = true; +// } + + } + public void close() { if (open) { open = false; runningQuery.cancel(true/* mayInterruptIfRunning */); src.close(); + checkFuture(); } } @@ -68,34 +99,17 @@ // buffer the next element. current = src.next(); - // test for abnormal completion of the runningQuery. if (!checkedFuture && runningQuery.isDone()) { - try { - runningQuery.get(); - } catch (InterruptedException e) { - /* - * Interrupted while waiting on the Future (should not happen - * since the Future is already done). - */ - throw new RuntimeException(e); - } catch (Throwable e) { - /* - * Exception thrown by the runningQuery. - */ - if (runningQuery.getCause() != null) { - // abnormal termination - wrap and rethrow. - throw new RuntimeException(e); - } - // otherwise this is normal termination. - } - checkedFuture = true; + + checkFuture(); + } - + // the next element is now buffered. return true; } - + public E next() { if (!hasNext()) This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-09-14 15:46:20
|
Revision: 5186 http://bigdata.svn.sourceforge.net/bigdata/?rev=5186&view=rev Author: thompsonbry Date: 2011-09-14 15:46:09 +0000 (Wed, 14 Sep 2011) Log Message: ----------- Modified the AbstractTransactionService to include the commitTime against which the transaction is reading in the TxState. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java 2011-09-14 14:27:58 UTC (rev 5185) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java 2011-09-14 15:46:09 UTC (rev 5186) @@ -677,10 +677,13 @@ try { - final long tx = assignTransactionIdentifier(timestamp); + final AtomicLong readCommitTime = new AtomicLong(); - activateTx(new TxState(tx)); + final long tx = assignTransactionIdentifier(timestamp, + readCommitTime); + activateTx(new TxState(tx, readCommitTime.get())); + return tx; } catch(TimeoutException ex) { @@ -1212,7 +1215,10 @@ * * @param timestamp * The timestamp. - * + * @param readCommitTime + * The commit point against which the transaction will read. This + * is set as a side-effect on the caller's argument. + * * @return The assigned transaction identifier. * * @throws InterruptedException @@ -1222,9 +1228,12 @@ * if a timeout occurs while awaiting a start time which would * satisfy the request. */ - protected long assignTransactionIdentifier(final long timestamp) + final protected long assignTransactionIdentifier(final long timestamp, + final AtomicLong readCommitTime) throws InterruptedException, TimeoutException { + final long lastCommitTime = getLastCommitTime(); + if (timestamp == ITx.UNISOLATED) { /* @@ -1240,12 +1249,13 @@ * the moment when we assigned this transaction identifier. */ + // The transaction will read from the most recent commit point. + readCommitTime.set(lastCommitTime); + return -nextTimestamp(); } - final long lastCommitTime = getLastCommitTime(); - // if (timestamp > lastTimestamp) { // // /* @@ -1268,6 +1278,9 @@ * READ_COMMITTED. */ + // The transaction will read from the most recent commit point. + readCommitTime.set(lastCommitTime); + return nextTimestamp(); } @@ -1285,6 +1298,9 @@ * timestamp. */ + // The transaction will read from the most recent commit point. + readCommitTime.set(lastCommitTime); + return nextTimestamp(); } @@ -1307,7 +1323,7 @@ } - return getStartTime(timestamp); + return getStartTime(timestamp, readCommitTime); } @@ -1321,11 +1337,15 @@ * * @param timestamp * The timestamp (identifies the desired commit point). + * @param readCommitTime + * The commit point against which the transaction will read. This + * is set as a side-effect on the caller's argument. * * @return A distinct timestamp not in use by any transaction that will read * from the same commit point. */ - protected long getStartTime(final long timestamp) + final protected long getStartTime(final long timestamp, + final AtomicLong readCommitTime) throws InterruptedException, TimeoutException { /* @@ -1334,6 +1354,9 @@ */ final long commitTime = findCommitTime(timestamp); + // The transaction will read from this commit point (-1 iff no commits yet). + readCommitTime.set(commitTime); + if (commitTime == -1L) { /* @@ -1853,6 +1876,14 @@ public final long tx; /** + * The commit time associated with the commit point against which this + * transaction will read. This will be <code>-1</code> IFF there are no + * commit points yet. Otherwise it is a real commit time associated with + * some existing commit point. + */ + public final long readCommitTime; + + /** * <code>true</code> iff the transaction is read-only. */ public final boolean readOnly; @@ -2048,7 +2079,15 @@ */ final protected ReentrantLock lock = new ReentrantLock(); - protected TxState(final long tx) { + /** + * + * @param tx + * The assigned transaction identifier. + * @param readCommitTime + * The commit time associated with the commit point against + * which this transaction will read. + */ + protected TxState(final long tx, final long readCommitTime) { if (tx == ITx.UNISOLATED) throw new IllegalArgumentException(); @@ -2058,6 +2097,8 @@ this.tx = tx; + this.readCommitTime = readCommitTime; + this.readOnly = TimestampUtility.isReadOnly(tx); // pre-compute the hash code for the transaction. Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java 2011-09-14 14:27:58 UTC (rev 5185) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java 2011-09-14 15:46:09 UTC (rev 5186) @@ -91,38 +91,38 @@ */ public interface Options { - /** - * How long you want to hold onto the database history (in milliseconds) - * or {@link Long#MAX_VALUE} for an (effectively) immortal database. The - * {@link ITransactionService} tracks the timestamp corresponding to the - * earliest running transaction (if any). When such a transaction - * exists, the actual release time is: - * - * <pre> - * releaseTime = min(lastCommitTime - 1, min(earliestRunningTx, now - minimumReleaseAge)) - * </pre> - * - * This ensures that history in use by running transactions is not - * released even when the minimumReleaseAge is ZERO (0). - * <p> - * When no transactions exist the actual release time is: - * - * <pre> - * releaseTime = min(commitTime - 1, now - minimumReleaseAge) - * </pre> - * - * This ensures that the the release time advances when no transactions - * are in use, but that the minimum release age is still respected. - * - * @see #DEFAULT_MIN_RELEASE_AGE - * @see #MIN_RELEASE_AGE_1H - * @see #MIN_RELEASE_AGE_1D - * @see #MIN_RELEASE_AGE_1W - * @see #MIN_RELEASE_AGE_NEVER - * - * @see AbstractTransactionService#updateReleaseTime(long) - * @see AbstractTransactionService#notifyCommit(long) - */ + /** + * How long you want to hold onto the database history (in milliseconds) + * or {@link Long#MAX_VALUE} for an (effectively) immortal database. The + * {@link ITransactionService} tracks the timestamp corresponding to the + * earliest running transaction (if any). When such a transaction + * exists, the actual release time is: + * + * <pre> + * releaseTime = min(lastCommitTime - 1, min(earliestRunningTx, now - minimumReleaseAge)) + * </pre> + * + * This ensures that history in use by running transactions is not + * released even when the minimumReleaseAge is ZERO (0). + * <p> + * When no transactions exist the actual release time is: + * + * <pre> + * releaseTime = min(commitTime - 1, now - minimumReleaseAge) + * </pre> + * + * This ensures that the the release time advances when no transactions + * are in use, but that the minimum release age is still respected. + * + * @see #DEFAULT_MIN_RELEASE_AGE + * @see #MIN_RELEASE_AGE_1H + * @see #MIN_RELEASE_AGE_1D + * @see #MIN_RELEASE_AGE_1W + * @see #MIN_RELEASE_AGE_NEVER + * + * @see AbstractTransactionService#updateReleaseTime(long) + * @see AbstractTransactionService#notifyCommit(long) + */ String MIN_RELEASE_AGE = AbstractTransactionService.class.getName() + ".minReleaseAge"; @@ -677,10 +677,13 @@ try { - final long tx = assignTransactionIdentifier(timestamp); + final AtomicLong readCommitTime = new AtomicLong(); - activateTx(new TxState(tx)); + final long tx = assignTransactionIdentifier(timestamp, + readCommitTime); + activateTx(new TxState(tx, readCommitTime.get())); + return tx; } catch(TimeoutException ex) { @@ -774,9 +777,9 @@ * Return the minimum over the absolute values of the active transactions. */ public long getEarliestTxStartTime() { - - return earliestTxStartTime; - + + return earliestTxStartTime; + } private volatile long earliestTxStartTime = 0L; @@ -997,15 +1000,15 @@ synchronized (startTimeIndex) { - // Note: ZERO (0) is the first tuple in the B+Tree. - // Note: MINUS ONE (-1) means that the B+Tree is empty. - final long indexOf = startTimeIndex.findIndexOf(timestamp); - - isEarliestTx = indexOf == 0; + // Note: ZERO (0) is the first tuple in the B+Tree. + // Note: MINUS ONE (-1) means that the B+Tree is empty. + final long indexOf = startTimeIndex.findIndexOf(timestamp); + + isEarliestTx = indexOf == 0; - // remove start time from the index. - if (indexOf != -1) - startTimeIndex.remove(timestamp); + // remove start time from the index. + if (indexOf != -1) + startTimeIndex.remove(timestamp); if (!isEarliestTx) { @@ -1109,65 +1112,65 @@ try { - updateReleaseTimeForBareCommit(commitTime); - + updateReleaseTimeForBareCommit(commitTime); + } finally { lock.unlock(); } - } + } - /** - * If there are NO active transactions and the current releaseTime is LT - * (commitTime-1) then compute and set the new releaseTime. - * <p> - * Note: This method was historically part of {@link #notifyCommit(long)}. - * It was moved into its own method so it can be overridden for some unit - * tests. - * - * @throws IllegalMonitorStateException - * unless the caller is holding the lock. - */ - protected void updateReleaseTimeForBareCommit(final long commitTime) { + /** + * If there are NO active transactions and the current releaseTime is LT + * (commitTime-1) then compute and set the new releaseTime. + * <p> + * Note: This method was historically part of {@link #notifyCommit(long)}. + * It was moved into its own method so it can be overridden for some unit + * tests. + * + * @throws IllegalMonitorStateException + * unless the caller is holding the lock. + */ + protected void updateReleaseTimeForBareCommit(final long commitTime) { -// if(!lock.isHeldByCurrentThread()) -// throw new IllegalMonitorStateException(); +// if(!lock.isHeldByCurrentThread()) +// throw new IllegalMonitorStateException(); - lock.lock(); - try { - synchronized (startTimeIndex) { + lock.lock(); + try { + synchronized (startTimeIndex) { - if (this.releaseTime < (commitTime - 1) - && startTimeIndex.getEntryCount() == 0) { + if (this.releaseTime < (commitTime - 1) + && startTimeIndex.getEntryCount() == 0) { - final long lastCommitTime = commitTime; + final long lastCommitTime = commitTime; - final long now = _nextTimestamp(); + final long now = _nextTimestamp(); - final long releaseTime = Math.min(lastCommitTime - 1, now - - minReleaseAge); + final long releaseTime = Math.min(lastCommitTime - 1, now + - minReleaseAge); - if (this.releaseTime < releaseTime) { + if (this.releaseTime < releaseTime) { - if (log.isInfoEnabled()) - log.info("Advancing releaseTime (no active tx)" - + ": lastCommitTime=" + lastCommitTime - + ", minReleaseAge=" + minReleaseAge + ", now=" - + now + ", releaseTime(" + this.releaseTime - + "->" + releaseTime + ")"); + if (log.isInfoEnabled()) + log.info("Advancing releaseTime (no active tx)" + + ": lastCommitTime=" + lastCommitTime + + ", minReleaseAge=" + minReleaseAge + ", now=" + + now + ", releaseTime(" + this.releaseTime + + "->" + releaseTime + ")"); - setReleaseTime(releaseTime); + setReleaseTime(releaseTime); - } + } - } + } - } - } finally { - lock.unlock(); - } + } + } finally { + lock.unlock(); + } } @@ -1212,7 +1215,10 @@ * * @param timestamp * The timestamp. - * + * @param readCommitTime + * The commit point against which the transaction will read. This + * is set as a side-effect on the caller's argument. + * * @return The assigned transaction identifier. * * @throws InterruptedException @@ -1222,9 +1228,12 @@ * if a timeout occurs while awaiting a start time which would * satisfy the request. */ - protected long assignTransactionIdentifier(final long timestamp) + final protected long assignTransactionIdentifier(final long timestamp, + final AtomicLong readCommitTime) throws InterruptedException, TimeoutException { + final long lastCommitTime = getLastCommitTime(); + if (timestamp == ITx.UNISOLATED) { /* @@ -1240,23 +1249,24 @@ * the moment when we assigned this transaction identifier. */ + // The transaction will read from the most recent commit point. + readCommitTime.set(lastCommitTime); + return -nextTimestamp(); } - final long lastCommitTime = getLastCommitTime(); - -// if (timestamp > lastTimestamp) { +// if (timestamp > lastTimestamp) { // // /* // * You can't request a historical read for a timestamp which has not // * yet been issued by this service! // */ // -// throw new IllegalStateException( -// "Timestamp is in the future: timestamp=" + timestamp -// + ", lastCommitTime=" + lastCommitTime -// + ", lastTimestamp=" + lastTimestamp); +// throw new IllegalStateException( +// "Timestamp is in the future: timestamp=" + timestamp +// + ", lastCommitTime=" + lastCommitTime +// + ", lastTimestamp=" + lastTimestamp); // // } else if (timestamp == lastCommitTime) { @@ -1268,6 +1278,9 @@ * READ_COMMITTED. */ + // The transaction will read from the most recent commit point. + readCommitTime.set(lastCommitTime); + return nextTimestamp(); } @@ -1285,6 +1298,9 @@ * timestamp. */ + // The transaction will read from the most recent commit point. + readCommitTime.set(lastCommitTime); + return nextTimestamp(); } @@ -1307,7 +1323,7 @@ } - return getStartTime(timestamp); + return getStartTime(timestamp, readCommitTime); } @@ -1321,11 +1337,15 @@ * * @param timestamp * The timestamp (identifies the desired commit point). + * @param readCommitTime + * The commit point against which the transaction will read. This + * is set as a side-effect on the caller's argument. * * @return A distinct timestamp not in use by any transaction that will read * from the same commit point. */ - protected long getStartTime(final long timestamp) + final protected long getStartTime(final long timestamp, + final AtomicLong readCommitTime) throws InterruptedException, TimeoutException { /* @@ -1334,6 +1354,9 @@ */ final long commitTime = findCommitTime(timestamp); + // The transaction will read from this commit point (-1 iff no commits yet). + readCommitTime.set(commitTime); + if (commitTime == -1L) { /* @@ -1853,6 +1876,14 @@ public final long tx; /** + * The commit time associated with the commit point against which this + * transaction will read. This will be <code>-1</code> IFF there are no + * commit points yet. Otherwise it is a real commit time associated with + * some existing commit point. + */ + public final long readCommitTime; + + /** * <code>true</code> iff the transaction is read-only. */ public final boolean readOnly; @@ -2048,7 +2079,15 @@ */ final protected ReentrantLock lock = new ReentrantLock(); - protected TxState(final long tx) { + /** + * + * @param tx + * The assigned transaction identifier. + * @param readCommitTime + * The commit time associated with the commit point against + * which this transaction will read. + */ + protected TxState(final long tx, final long readCommitTime) { if (tx == ITx.UNISOLATED) throw new IllegalArgumentException(); @@ -2058,6 +2097,8 @@ this.tx = tx; + this.readCommitTime = readCommitTime; + this.readOnly = TimestampUtility.isReadOnly(tx); // pre-compute the hash code for the transaction. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-09-20 16:12:37
|
Revision: 5221 http://bigdata.svn.sourceforge.net/bigdata/?rev=5221&view=rev Author: thompsonbry Date: 2011-09-20 16:12:26 +0000 (Tue, 20 Sep 2011) Log Message: ----------- Javadoc update on Memoizer. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/util/concurrent/Memoizer.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/util/concurrent/Memoizer.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/util/concurrent/Memoizer.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/util/concurrent/Memoizer.java 2011-09-20 16:04:12 UTC (rev 5220) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/util/concurrent/Memoizer.java 2011-09-20 16:12:26 UTC (rev 5221) @@ -84,7 +84,14 @@ if (f == null) { willRun = true; // Note: MUST set before running! f = ft; - ft.run(); // call to c.compute() happens here; MAY throw out RuntimeException but WILL set exception on FutureTask. + /* + * Note: MAY throw out RuntimeException but WILL set + * exception on FutureTask. Thus the thread which invokes + * ft.run() will have any uncaught exception tossed out of + * ft.run() while other threads will see that exception + * wrapped as an ExecutionException when they call f.get(). + */ + ft.run(); // call to c.compute() happens here. } } try { @@ -110,17 +117,33 @@ // InterruptedException.class)) { /* * Since the task was executed by another thread (ft.run()), - * remove the interrupted task and retry. + * remove the task and retry. * * Note: Basically, what has happened is that the thread * which got to cache.putIfAbsent() first ran the Computable - * and was interrupted while doing so, so that thread needs - * to propagate the InterruptedException back to its caller. + * and something was thrown out of ft.run(), so the thread + * which ran the task needs to propagate the + * InterruptedException back to its caller. + * + * Typically this situation arises when the thread actually + * running the task in ft.run() was interrupted, resulting + * in an wrapped InterruptedException or a wrapped + * ClosedByInterruptException. + * * However, other threads which concurrently request the * same computation MUST NOT see the InterruptedException * since they were not actually interrupted. Therefore, we * yank out the FutureTask and retry for any thread which * did not run the task itself. + * + * If there is a real underlying error, this forces each + * thread who is requesting computation to attempt the + * computation and report back the error in their own + * thread. If the exception is a transient, with the most + * common example being an interrupt, then the operation + * will succeed for the next thread which attempts ft.run() + * and all other threads waiting on f.get() will observe the + * successfully computed Future. */ cache.remove(arg, f); // Retry. Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/util/concurrent/Memoizer.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/util/concurrent/Memoizer.java 2011-09-20 16:04:12 UTC (rev 5220) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/util/concurrent/Memoizer.java 2011-09-20 16:12:26 UTC (rev 5221) @@ -36,8 +36,6 @@ import java.util.concurrent.Future; import java.util.concurrent.FutureTask; -import com.bigdata.util.InnerCause; - /** * Pattern using a {@link FutureTask} to force synchronization only on tasks * waiting for the same computation. This is based on Java Concurrency in @@ -84,6 +82,13 @@ if (f == null) { willRun = true; // Note: MUST set before running! f = ft; + /* + * Note: MAY throw out RuntimeException but WILL set + * exception on FutureTask. Thus the thread which invokes + * ft.run() will have any uncaught exception tossed out of + * ft.run() while other threads will see that exception + * wrapped as an ExecutionException when they call f.get(). + */ ft.run(); // call to c.compute() happens here. } } @@ -105,22 +110,38 @@ e2.initCause(e); throw e2; } catch (ExecutionException e) { - if (!willRun - && InnerCause.isInnerCause(e, - InterruptedException.class)) { + if (!willRun) { +// && InnerCause.isInnerCause(e, +// InterruptedException.class)) { /* * Since the task was executed by another thread (ft.run()), - * remove the interrupted task and retry. + * remove the task and retry. * * Note: Basically, what has happened is that the thread * which got to cache.putIfAbsent() first ran the Computable - * and was interrupted while doing so, so that thread needs - * to propagate the InterruptedException back to its caller. + * and something was thrown out of ft.run(), so the thread + * which ran the task needs to propagate the + * InterruptedException back to its caller. + * + * Typically this situation arises when the thread actually + * running the task in ft.run() was interrupted, resulting + * in an wrapped InterruptedException or a wrapped + * ClosedByInterruptException. + * * However, other threads which concurrently request the * same computation MUST NOT see the InterruptedException * since they were not actually interrupted. Therefore, we * yank out the FutureTask and retry for any thread which * did not run the task itself. + * + * If there is a real underlying error, this forces each + * thread who is requesting computation to attempt the + * computation and report back the error in their own + * thread. If the exception is a transient, with the most + * common example being an interrupt, then the operation + * will succeed for the next thread which attempts ft.run() + * and all other threads waiting on f.get() will observe the + * successfully computed Future. */ cache.remove(arg, f); // Retry. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-09-20 20:18:57
|
Revision: 5222 http://bigdata.svn.sourceforge.net/bigdata/?rev=5222&view=rev Author: thompsonbry Date: 2011-09-20 20:18:48 +0000 (Tue, 20 Sep 2011) Log Message: ----------- Added missing license file for jetty. Corrected license file for servlet-api. This dependency is from the Apache Tomcat project. @see https://sourceforge.net/apps/trac/bigdata/ticket/374 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/LEGAL/fastutil-license.txt branches/TERMS_REFACTOR_BRANCH/bigdata/LEGAL/fastutil-license.txt Added Paths: ----------- branches/BIGDATA_RELEASE_1_0_0/bigdata/LEGAL/jetty-license.txt branches/TERMS_REFACTOR_BRANCH/bigdata/LEGAL/jetty-license.txt Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/LEGAL/fastutil-license.txt =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/LEGAL/fastutil-license.txt 2011-09-20 16:12:26 UTC (rev 5221) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/LEGAL/fastutil-license.txt 2011-09-20 20:18:48 UTC (rev 5222) @@ -1,504 +1,201 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 2.1, February 1999 + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - Copyright (C) 1991, 1999 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -[This is the first released version of the Lesser GPL. It also counts - as the successor of the GNU Library Public License, version 2, hence - the version number 2.1.] + 1. Definitions. - Preamble + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -Licenses are intended to guarantee your freedom to share and change -free software--to make sure the software is free for all its users. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. - This license, the Lesser General Public License, applies to some -specially designated software packages--typically libraries--of the -Free Software Foundation and other authors who decide to use it. You -can use it too, but we suggest you first think carefully about whether -this license or the ordinary General Public License is the better -strategy to use in any particular case, based on the explanations below. + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. - When we speak of free software, we are referring to freedom of use, -not price. Our General Public Licenses are designed to make sure that -you have the freedom to distribute copies of free software (and charge -for this service if you wish); that you receive source code or can get -it if you want it; that you can change the software and use pieces of -it in new free programs; and that you are informed that you can do -these things. + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. - To protect your rights, we need to make restrictions that forbid -distributors to deny you these rights or to ask you to surrender these -rights. These restrictions translate to certain responsibilities for -you if you distribute copies of the library or if you modify it. + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. - For example, if you distribute copies of the library, whether gratis -or for a fee, you must give the recipients all the rights that we gave -you. You must make sure that they, too, receive or can get the source -code. If you link other code with the library, you must provide -complete object files to the recipients, so that they can relink them -with the library after making changes to the library and recompiling -it. And you must show them these terms so they know their rights. + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. - We protect your rights with a two-step method: (1) we copyright the -library, and (2) we offer you this license, which gives you legal -permission to copy, distribute and/or modify the library. + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). - To protect each distributor, we want to make it very clear that -there is no warranty for the free library. Also, if the library is -modified by someone else and passed on, the recipients should know -that what they have is not the original version, so that the original -author's reputation will not be affected by problems that might be -introduced by others. - - Finally, software patents pose a constant threat to the existence of -any free program. We wish to make sure that a company cannot -effectively restrict the users of a free program by obtaining a -restrictive license from a patent holder. Therefore, we insist that -any patent license obtained for a version of the library must be -consistent with the full freedom of use specified in this license. + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. - Most GNU software, including some libraries, is covered by the -ordinary GNU General Public License. This license, the GNU Lesser -General Public License, applies to certain designated libraries, and -is quite different from the ordinary General Public License. We use -this license for certain libraries in order to permit linking those -libraries into non-free programs. + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." - When a program is linked with a library, whether statically or using -a shared library, the combination of the two is legally speaking a -combined work, a derivative of the original library. The ordinary -General Public License therefore permits such linking only if the -entire combination fits its criteria of freedom. The Lesser General -Public License permits more lax criteria for linking other code with -the library. + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. - We call this license the "Lesser" General Public License because it -does Less to protect the user's freedom than the ordinary General -Public License. It also provides other free software developers Less -of an advantage over competing non-free programs. These disadvantages -are the reason we use the ordinary General Public License for many -libraries. However, the Lesser license provides advantages in certain -special circumstances. + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. - For example, on rare occasions, there may be a special need to -encourage the widest possible use of a certain library, so that it becomes -a de-facto standard. To achieve this, non-free programs must be -allowed to use the library. A more frequent case is that a free -library does the same job as widely used non-free libraries. In this -case, there is little to gain by limiting the free library to free -software only, so we use the Lesser General Public License. + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. - In other cases, permission to use a particular library in non-free -programs enables a greater number of people to use a large body of -free software. For example, permission to use the GNU C Library in -non-free programs enables many more people to use the whole GNU -operating system, as well as its variant, the GNU/Linux operating -system. + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: - Although the Lesser General Public License is Less protective of the -users' freedom, it does ensure that the user of a program that is -linked with the Library has the freedom and the wherewithal to run -that program using a modified version of the Library. + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and - The precise terms and conditions for copying, distribution and -modification follow. Pay close attention to the difference between a -"work based on the library" and a "work that uses the library". The -former contains code derived from the library, whereas the latter must -be combined with the library in order to run. - - GNU LESSER GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and - 0. This License Agreement applies to any software library or other -program which contains a notice placed by the copyright holder or -other authorized party saying it may be distributed under the terms of -this Lesser General Public License (also called "this License"). -Each licensee is addressed as "you". + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and - A "library" means a collection of software functions and/or data -prepared so as to be conveniently linked with application programs -(which use some of those functions and data) to form executables. + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. - The "Library", below, refers to any such software library or work -which has been distributed under these terms. A "work based on the -Library" means either the Library or any derivative work under -copyright law: that is to say, a work containing the Library or a -portion of it, either verbatim or with modifications and/or translated -straightforwardly into another language. (Hereinafter, translation is -included without limitation in the term "modification".) + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. - "Source code" for a work means the preferred form of the work for -making modifications to it. For a library, complete source code means -all the source code for all modules it contains, plus any associated -interface definition files, plus the scripts used to control compilation -and installation of the library. + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. - Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running a program using the Library is not restricted, and output from -such a program is covered only if its contents constitute a work based -on the Library (independent of the use of the Library in a tool for -writing it). Whether that is true depends on what the Library does -and what the program that uses the Library does. - - 1. You may copy and distribute verbatim copies of the Library's -complete source code as you receive it, in any medium, provided that -you conspicuously and appropriately publish on each copy an -appropriate copyright notice and disclaimer of warranty; keep intact -all the notices that refer to this License and to the absence of any -warranty; and distribute a copy of this License along with the -Library. + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. - You may charge a fee for the physical act of transferring a copy, -and you may at your option offer warranty protection in exchange for a -fee. - - 2. You may modify your copy or copies of the Library or any portion -of it, thus forming a work based on the Library, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. - a) The modified work must itself be a software library. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. - b) You must cause the files modified to carry prominent notices - stating that you changed the files and the date of any change. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. - c) You must cause the whole of the work to be licensed at no - charge to all third parties under the terms of this License. + END OF TERMS AND CONDITIONS - d) If a facility in the modified Library refers to a function or a - table of data to be supplied by an application program that uses - the facility, other than as an argument passed when the facility - is invoked, then you must make a good faith effort to ensure that, - in the event an application does not supply such function or - table, the facility still operates, and performs whatever part of - its purpose remains meaningful. + APPENDIX: How to apply the Apache License to your work. - (For example, a function in a library to compute square roots has - a purpose that is entirely well-defined independent of the - application. Therefore, Subsection 2d requires that any - application-supplied function or table used by this function must - be optional: if the application does not supply it, the square - root function must still compute square roots.) + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Library, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Library, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote -it. + Copyright [yyyy] [name of copyright owner] -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Library. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -In addition, mere aggregation of another work not based on the Library -with the Library (or with a work based on the Library) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. + http://www.apache.org/licenses/LICENSE-2.0 - 3. You may opt to apply the terms of the ordinary GNU General Public -License instead of this License to a given copy of the Library. To do -this, you must alter all the notices that refer to this License, so -that they refer to the ordinary GNU General Public License, version 2, -instead of to this License. (If a newer version than version 2 of the -ordinary GNU General Public License has appeared, then you can specify -that version instead if you wish.) Do not make any other change in -these notices. - - Once this change is made in a given copy, it is irreversible for -that copy, so the ordinary GNU General Public License applies to all -subsequent copies and derivative works made from that copy. - - This option is useful when you wish to copy part of the code of -the Library into a program that is not a library. - - 4. You may copy and distribute the Library (or a portion or -derivative of it, under Section 2) in object code or executable form -under the terms of Sections 1 and 2 above provided that you accompany -it with the complete corresponding machine-readable source code, which -must be distributed under the terms of Sections 1 and 2 above on a -medium customarily used for software interchange. - - If distribution of object code is made by offering access to copy -from a designated place, then offering equivalent access to copy the -source code from the same place satisfies the requirement to -distribute the source code, even though third parties are not -compelled to copy the source along with the object code. - - 5. A program that contains no derivative of any portion of the -Library, but is designed to work with the Library by being compiled or -linked with it, is called a "work that uses the Library". Such a -work, in isolation, is not a derivative work of the Library, and -therefore falls outside the scope of this License. - - However, linking a "work that uses the Library" with the Library -creates an executable that is a derivative of the Library (because it -contains portions of the Library), rather than a "work that uses the -library". The executable is therefore covered by this License. -Section 6 states terms for distribution of such executables. - - When a "work that uses the Library" uses material from a header file -that is part of the Library, the object code for the work may be a -derivative work of the Library even though the source code is not. -Whether this is true is especially significant if the work can be -linked without the Library, or if the work is itself a library. The -threshold for this to be true is not precisely defined by law. - - If such an object file uses only numerical parameters, data -structure layouts and accessors, and small macros and small inline -functions (ten lines or less in length), then the use of the object -file is unrestricted, regardless of whether it is legally a derivative -work. (Executables containing this object code plus portions of the -Library will still fall under Section 6.) - - Otherwise, if the work is a derivative of the Library, you may -distribute the object code for the work under the terms of Section 6. -Any executables containing that work also fall under Section 6, -whether or not they are linked directly with the Library itself. - - 6. As an exception to the Sections above, you may also combine or -link a "work that uses the Library" with the Library to produce a -work containing portions of the Library, and distribute that work -under terms of your choice, provided that the terms permit -modification of the work for the customer's own use and reverse -engineering for debugging such modifications. - - You must give prominent notice with each copy of the work that the -Library is used in it and that the Library and its use are covered by -this License. You must supply a copy of this License. If the work -during execution displays copyright notices, you must include the -copyright notice for the Library among them, as well as a reference -directing the user to the copy of this License. Also, you must do one -of these things: - - a) Accompany the work with the complete corresponding - machine-readable source code for the Library including whatever - changes were used in the work (which must be distributed under - Sections 1 and 2 above); and, if the work is an executable linked - with the Library, with the complete machine-readable "work that - uses the Library", as object code and/or source code, so that the - user can modify the Library and then relink to produce a modified - executable containing the modified Library. (It is understood - that the user who changes the contents of definitions files in the - Library will not necessarily be able to recompile the application - to use the modified definitions.) - - b) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (1) uses at run time a - copy of the library already present on the user's computer system, - rather than copying library functions into the executable, and (2) - will operate properly with a modified version of the library, if - the user installs one, as long as the modified version is - interface-compatible with the version that the work was made with. - - c) Accompany the work with a written offer, valid for at - least three years, to give the same user the materials - specified in Subsection 6a, above, for a charge no more - than the cost of performing this distribution. - - d) If distribution of the work is made by offering access to copy - from a designated place, offer equivalent access to copy the above - specified materials from the same place. - - e) Verify that the user has already received a copy of these - materials or that you have already sent this user a copy. - - For an executable, the required form of the "work that uses the -Library" must include any data and utility programs needed for -reproducing the executable from it. However, as a special exception, -the materials to be distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies -the executable. - - It may happen that this requirement contradicts the license -restrictions of other proprietary libraries that do not normally -accompany the operating system. Such a contradiction means you cannot -use both them and the Library together in an executable that you -distribute. - - 7. You may place library facilities that are a work based on the -Library side-by-side in a single library together with other library -facilities not covered by this License, and distribute such a combined -library, provided that the separate distribution of the work based on -the Library and of the other library facilities is otherwise -permitted, and provided that you do these two things: - - a) Accompany the combined library with a copy of the same work - based on the Library, uncombined with any other library - facilities. This must be distributed under the terms of the - Sections above. - - b) Give prominent notice with the combined library of the fact - that part of it is a work based on the Library, and explaining - where to find the accompanying uncombined form of the same work. - - 8. You may not copy, modify, sublicense, link with, or distribute -the Library except as expressly provided under this License. Any -attempt otherwise to copy, modify, sublicense, link with, or -distribute the Library is void, and will automatically terminate your -rights under this License. However, parties who have received copies, -or rights, from you under this License will not have their licenses -terminated so long as such parties remain in full compliance. - - 9. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Library or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Library (or any work based on the -Library), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Library or works based on it. - - 10. Each time you redistribute the Library (or any work based on the -Library), the recipient automatically receives a license from the -original licensor to copy, distribute, link with or modify the Library -subject to these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties with -this License. - - 11. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Library at all. For example, if a patent -license would not permit royalty-free redistribution of the Library by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Library. - -If any portion of this section is held invalid or unenforceable under any -particular circumstance, the balance of the section is intended to apply, -and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 12. If the distribution and/or use of the Library is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Library under this License may add -an explicit geographical distribution limitation excluding those countries, -so that distribution is permitted only in or among countries not thus -excluded. In such case, this License incorporates the limitation as if -written in the body of this License. - - 13. The Free Software Foundation may publish revised and/or new -versions of the Lesser General Public License from time to time. -Such new versions will be similar in spirit to the present version, -but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Library -specifies a version number of this License which applies to it and -"any later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Library does not specify a -license version number, you may choose any version ever published by -the Free Software Foundation. - - 14. If you wish to incorporate parts of the Library into other free -programs whose distribution conditions are incompatible with these, -write to the author to ask for permission. For software which is -copyrighted by the Free Software Foundation, write to the Free -Software Foundation; we sometimes make exceptions for this. Our -decision will be guided by the two goals of preserving the free status -of all derivatives of our free software and of promoting the sharing -and reuse of software generally. - - NO WARRANTY - - 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY -KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE -LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME -THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU -FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR -CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE -LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING -RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A -FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF -SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH -DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Libraries - - If you develop a new library, and you want it to be of the greatest -possible use to the public, we recommend making it free software that -everyone can redistribute and change. You can do so by permitting -redistribution under these terms (or, alternatively, under the terms of the -ordinary General Public License). - - To apply these terms, attach the following notices to the library. It is -safest to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - <one line to give the library's name and a brief idea of what it does.> - Copyright (C) <year> <name of author> - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Also add information on how to contact you by electronic and paper mail. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the library, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - library `Frob' (a library for tweaking knobs) written by James Random Hacker. - - <signature of Ty Coon>, 1 April 1990 - Ty Coon, President of Vice - -That's all there is to it! - - + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. Added: branches/BIGDATA_RELEASE_1_0_0/bigdata/LEGAL/jetty-license.txt =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/LEGAL/jetty-license.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/LEGAL/jetty-license.txt 2011-09-20 20:18:48 UTC (rev 5222) @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. Property changes on: branches/BIGDATA_RELEASE_1_0_0/bigdata/LEGAL/jetty-license.txt ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/LEGAL/fastutil-license.txt =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/LEGAL/fastutil-license.txt 2011-09-20 16:12:26 UTC (rev 5221) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/LEGAL/fastutil-license.txt 2011-09-20 20:18:48 UTC (rev 5222) @@ -1,504 +1,201 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 2.1, February 1999 + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - Copyright (C) 1991, 1999 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -[This is the first released version of the Lesser GPL. It also counts - as the successor of the GNU Library Public License, version 2, hence - the version number 2.1.] + 1. Definitions. - Preamble + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -Licenses are intended to guarantee your freedom to share and change -free software--to make sure the software is free for all its users. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. - This license, the Lesser General Public License, applies to some -specially designated software packages--typically libraries--of the -Free Software Foundation and other authors who decide to use it. You -can use it too, but we suggest you first think carefully about whether -this license or the ordinary General Public License is the better -strategy to use in any particular case, based on the explanations below. + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. - When we speak of free software, we are referring to freedom of use, -not price. Our General Public Licenses are designed to make sure that -you have the freedom to distribute copies of free software (and charge -for this service if you wish); that you receive source code or can get -it if you want it; that you can change the software and use pieces of -it in new free programs; and that you are informed that you can do -these things. + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. - To protect your rights, we need to make restrictions that forbid -distributors to deny you these rights or to ask you to surrender these -rights. These restrictions translate to certain responsibilities for -you if you distribute copies of the library or if you modify it. + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. - For example, if you distribute copies of the library, whether gratis -or for a fee, you must give the recipients all the rights that we gave -you. You must make sure that they, too, receive or can get the source -code. If you link other code with the library, you must provide -complete object files to the recipients, so that they can relink them -with the library after making changes to the library and recompiling -it. And you must show them these terms so they know their rights. + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. - We protect your rights with a two-step method: (1) we copyright the -library, and (2) we offer you this license, which gives you legal -permission to copy, distribute and/or modify the library. + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). - To protect each distributor, we want to make it very clear that -there is no warranty for the free library. Also, if the library is -modified by someone else and passed on, the recipients should know -that what they have is not the original version, so that the original -author's reputation will not be affected by problems that might be -introduced by others. - - Finally, software patents pose a constant threat to the existence of -any free program. We wish to make sure that a company cannot -effectively restrict the users of a free program by obtaining a -restrictive license from a patent holder. Therefore, we insist that -any patent license obtained for a version of the library must be -consistent with the full freedom of use specified in this license. + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. - Most GNU software, including some libraries, is covered by the -ordinary GNU General Public License. This license, the GNU Lesser -General Public License, applies to certain designated libraries, and -is quite different from the ordinary General Public License. We use -this license for certain libraries in order to permit linking those -libraries into non-free programs. + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." - When a program is linked with a library, whether statically or using -a shared library, the combination of the two is legally speaking a -combined work, a derivative of the original library. The ordinary -General Public License therefore permits such linking only if the -entire combination fits its criteria of freedom. The Lesser General -Public License permits more lax criteria for linking other code with -the library. + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. - We call this license the "Lesser" General Public License because it -does Less to protect the user's freedom than the ordinary General -Public License. It also provides other free software developers Less -of an advantage over competing non-free programs. These disadvantages -are the reason we use the ordinary General Public License for many -libraries. However, the Lesser license provides advantages in certain -special circumstances. + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. - For example, on rare occasions, there may be a special need to -encourage the wi... [truncated message content] |
From: <tho...@us...> - 2011-09-20 20:21:55
|
Revision: 5223 http://bigdata.svn.sourceforge.net/bigdata/?rev=5223&view=rev Author: thompsonbry Date: 2011-09-20 20:21:49 +0000 (Tue, 20 Sep 2011) Log Message: ----------- Added logging statements (which are commented out) to log the open/close of transactions on the journal. This information might eventually be attached to a specific logger as it can be useful for tracking down cases when people are pinning the RWStore history with a very long lived read-only transaction. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/Journal.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/Journal.java 2011-09-20 20:18:48 UTC (rev 5222) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/Journal.java 2011-09-20 20:21:49 UTC (rev 5223) @@ -319,6 +319,7 @@ protected void activateTx(final TxState state) { final IBufferStrategy bufferStrategy = Journal.this.getBufferStrategy(); if(bufferStrategy instanceof RWStrategy) { +// Logger.getLogger("TransactionTrace").info("OPEN: txId="+state.tx+", readsOnCommitTime="+state.readCommitTime); ((RWStrategy)bufferStrategy).getRWStore().activateTx(); } super.activateTx(state); @@ -328,6 +329,7 @@ super.deactivateTx(state); final IBufferStrategy bufferStrategy = Journal.this.getBufferStrategy(); if(bufferStrategy instanceof RWStrategy) { +// Logger.getLogger("TransactionTrace").info("DONE: txId="+state.tx+", readsOnCommitTime="+state.readCommitTime); ((RWStrategy)bufferStrategy).getRWStore().deactivateTx(); } } Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java 2011-09-20 20:18:48 UTC (rev 5222) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java 2011-09-20 20:21:49 UTC (rev 5223) @@ -319,6 +319,7 @@ protected void activateTx(final TxState state) { final IBufferStrategy bufferStrategy = Journal.this.getBufferStrategy(); if(bufferStrategy instanceof RWStrategy) { +// Logger.getLogger("TransactionTrace").info("OPEN: txId="+state.tx+", readsOnCommitTime="+state.readCommitTime); ((RWStrategy)bufferStrategy).getRWStore().activateTx(); } super.activateTx(state); @@ -328,6 +329,7 @@ super.deactivateTx(state); final IBufferStrategy bufferStrategy = Journal.this.getBufferStrategy(); if(bufferStrategy instanceof RWStrategy) { +// Logger.getLogger("TransactionTrace").info("DONE: txId="+state.tx+", readsOnCommitTime="+state.readCommitTime); ((RWStrategy)bufferStrategy).getRWStore().deactivateTx(); } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-09-21 19:27:24
|
Revision: 5230 http://bigdata.svn.sourceforge.net/bigdata/?rev=5230&view=rev Author: thompsonbry Date: 2011-09-21 19:27:13 +0000 (Wed, 21 Sep 2011) Log Message: ----------- A variety of license / copyright notice cleanup related changes. done. Added com.bigdata.Depends. This file has the name of each dependency component, the project URL, and the license URL. This can be used to self-report on the dependencies. Not all dependencies are required for all deployments, but this class does not presently allow selective reporting based on the kind of deployment (standalone versus cluster). done. Modified the Banner class to include a notice for each dependency, including the name of the component, the project URL, and a link to the license file. done. Our own license file is not making it into the top-level of bigdata.jar or bigdata.war. The JAR now includes the NOTICE files that we have in our source tree for the code that is bundled into the JAR. The JAR does not bundle those dependencies so it does not need to include the LEGAL/* files for them dependencies. However, we DO need the LICENSE files for Apache and Sesame since we have imported some code from those projects. The WAR actually redistributes dependencies. It needs to include all the files in the LEGAL directories in our source tree (these are the dependency license files) plus all the NOTICE files in those dependencies. Wrote an ant task to concatenate all NOTICE(.txt) files into a single combined NOTICE file. The JAR now includes the combined NOTICE for the bigdata source tree as bundled into the JAR. The WAR now includes the combined NOTICE for all dependencies (even those which are not actually bundled into the WAR). done. The overview.html file is missing for the javadoc. done. Verify WAR deployment. done. Bump up the branching factor for the SPO relation for the default kb when deploying under the webapp. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/LEGAL/NOTICE branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java branches/BIGDATA_RELEASE_1_0_0/bigdata-gom/LICENSE.txt branches/BIGDATA_RELEASE_1_0_0/bigdata-jini/LEGAL/NOTICE branches/BIGDATA_RELEASE_1_0_0/bigdata-jini/LEGAL/README.txt branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/LEGAL/NOTICE branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/samples/com/bigdata/samples/quads.properties branches/BIGDATA_RELEASE_1_0_0/bigdata-war/RWStore.properties branches/BIGDATA_RELEASE_1_0_0/build.properties branches/BIGDATA_RELEASE_1_0_0/build.xml branches/BIGDATA_RELEASE_1_0_0/dsi-utils/LEGAL/NOTICE branches/BIGDATA_RELEASE_1_0_0/lgpl-utils/LEGAL/NOTICE branches/BIGDATA_RELEASE_1_0_0/overview.html branches/TERMS_REFACTOR_BRANCH/bigdata/LEGAL/NOTICE branches/TERMS_REFACTOR_BRANCH/bigdata/lib/icu/ICU-README.txt branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/Banner.java branches/TERMS_REFACTOR_BRANCH/bigdata-gom/LICENSE.txt branches/TERMS_REFACTOR_BRANCH/bigdata-jini/LEGAL/NOTICE branches/TERMS_REFACTOR_BRANCH/bigdata-jini/LEGAL/README.txt branches/TERMS_REFACTOR_BRANCH/bigdata-sails/LEGAL/NOTICE branches/TERMS_REFACTOR_BRANCH/bigdata-war/RWStore.properties branches/TERMS_REFACTOR_BRANCH/build.xml branches/TERMS_REFACTOR_BRANCH/dsi-utils/LEGAL/NOTICE branches/TERMS_REFACTOR_BRANCH/junit-ext/NOTICE branches/TERMS_REFACTOR_BRANCH/lgpl-utils/LEGAL/NOTICE branches/TERMS_REFACTOR_BRANCH/overview.html Added Paths: ----------- branches/BIGDATA_RELEASE_1_0_0/NOTICE branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Depends.java branches/TERMS_REFACTOR_BRANCH/NOTICE branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/Depends.java branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/LEGAL/NOTICE branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/LEGAL/lubm-license.txt branches/TERMS_REFACTOR_BRANCH/bigdata-rdf/LEGAL/NOTICE branches/TERMS_REFACTOR_BRANCH/bigdata-rdf/lib/NOTICE branches/TERMS_REFACTOR_BRANCH/bigdata-sails/LEGAL/README.txt branches/TERMS_REFACTOR_BRANCH/lgpl-utils/LEGAL/infinispan-license.txt Removed Paths: ------------- branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/LEGAL/LICENSE.txt branches/TERMS_REFACTOR_BRANCH/bigdata-sails/LEGAL/jetty-license.txt branches/TERMS_REFACTOR_BRANCH/lgpl-utils/src/java/package.html Added: branches/BIGDATA_RELEASE_1_0_0/NOTICE =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/NOTICE (rev 0) +++ branches/BIGDATA_RELEASE_1_0_0/NOTICE 2011-09-21 19:27:13 UTC (rev 5230) @@ -0,0 +1,21 @@ + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/LEGAL/NOTICE =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/LEGAL/NOTICE 2011-09-21 12:29:53 UTC (rev 5229) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/LEGAL/NOTICE 2011-09-21 19:27:13 UTC (rev 5230) @@ -1,2 +1,5 @@ -Portions of this softwere were developed by The Apache Foundation. The relevant files -are under the Apache 2.0 license and may be found in the org.apache package namespace. + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/)." + +Copyright 2009 The Apache Software Foundation Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java 2011-09-21 12:29:53 UTC (rev 5229) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java 2011-09-21 19:27:13 UTC (rev 5230) @@ -28,14 +28,22 @@ package com.bigdata; +import java.lang.reflect.Field; import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.util.Collections; import java.util.Date; +import java.util.Formatter; +import java.util.LinkedHashMap; +import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.system.SystemUtil; +import com.bigdata.Depends.Dependency; import com.bigdata.counters.AbstractStatisticsCollector; import com.bigdata.util.InnerCause; @@ -49,6 +57,11 @@ */ public class Banner { + /** + * The logger for <em>this</em> class. + */ + private static final Logger log = Logger.getLogger("com.bigdata.Banner"); + private final static AtomicBoolean didBanner = new AtomicBoolean(false); /** @@ -72,7 +85,7 @@ } - synchronized static public void banner() { + static public void banner() { if(didBanner.compareAndSet(false/*expect*/, true/*update*/)) { @@ -80,55 +93,55 @@ if (!quiet) { - System.out.println(banner); - - } + final StringBuilder sb = new StringBuilder(banner); - final Logger log = Logger.getLogger("com.bigdata"); + // Add in the dependencies. + { + int maxNameLen = 0, maxProjectLen = 0, maxLicenseLen = 0; + for (Dependency dep : com.bigdata.Depends.depends()) { + if (dep.getName().length() > maxNameLen) + maxNameLen = dep.getName().length(); + if (dep.projectURL().length() > maxProjectLen) + maxProjectLen = dep.projectURL().length(); + if (dep.licenseURL().length() > maxLicenseLen) + maxLicenseLen = dep.licenseURL().length(); + } + maxNameLen = Math.min(80, maxNameLen); + maxProjectLen = Math.min(80, maxProjectLen); + maxLicenseLen = Math.min(80, maxLicenseLen); - if (log.getLevel() == null) { + final Formatter f = new Formatter(sb); - /* - * Since there is no default for com.bigdata, default to WARN. - */ - try { + final String fmt1 = "" // + + "%-" + maxNameLen + "s"// +// + " %-" + maxProjectLen + "s" // + + " %-" + maxLicenseLen + "s"// + + "\n"; - log.setLevel(Level.WARN); + f.format(fmt1, "Dependency", "License"); + + for (Dependency dep : com.bigdata.Depends.depends()) { - if (!quiet) - log.warn("Defaulting log level to WARN: " - + log.getName()); + f.format(fmt1, // + dep.getName(),// +// dep.projectURL(),// + dep.licenseURL()// + ); - } catch (Throwable t) { - - /* - * Note: The SLF4J bridge can cause a NoSuchMethodException - * to be thrown out of Logger.setLevel(). We trap this - * exception and log a message @ ERROR. It is critical that - * bigdata logging is properly configured as logging at INFO - * for com.bigdata will cause a tremendous loss of - * performance. - * - * @see https://sourceforge.net/apps/trac/bigdata/ticket/362 - */ - if (InnerCause.isInnerCause(t, NoSuchMethodException.class)) { - - log.error("Unable to raise the default log level to WARN." - + " Logging is NOT properly configured." - + " Severe performance penalty will result."); - - } else { - - // Something else that we are not expecting. - throw new RuntimeException(t); - } - } + + System.out.println(sb); + + } - } // if(log.getLevel() == null) - /* + * If logging is not configured for [com.bigdata] then we set a + * default log level @ WARN. This is critical for good performance. + */ + setDefaultLogLevel(quiet); + + /* * Note: I have modified this to test for disabled registration and * to use reflection in order to decouple the JMX dependency for * anzo. @@ -159,8 +172,160 @@ } } + + /** + * If logging is not configured for [com.bigdata] then we set a default log + * level @ WARN. This is critical for good performance. + */ + private static void setDefaultLogLevel(final boolean quiet) { + final Logger defaultLog = Logger.getLogger("com.bigdata"); + + if (defaultLog.getLevel() == null) { + + /* + * Since there is no default for com.bigdata, default to WARN. + */ + try { + + defaultLog.setLevel(Level.WARN); + + if (!quiet) + log.warn("Defaulting log level to WARN: " + + defaultLog.getName()); + + } catch (Throwable t) { + + /* + * Note: The SLF4J bridge can cause a NoSuchMethodException to + * be thrown out of Logger.setLevel(). We trap this exception + * and log a message @ ERROR. It is critical that bigdata + * logging is properly configured as logging at INFO for + * com.bigdata will cause a tremendous loss of performance. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/362 + */ + if (InnerCause.isInnerCause(t, NoSuchMethodException.class)) { + + log.error("Unable to raise the default log level to WARN." + + " Logging is NOT properly configured." + + " Severe performance penalty will result."); + + } else { + + // Something else that we are not expecting. + throw new RuntimeException(t); + + } + + } + + } // if(log.getLevel() == null) + + } + /** + * Use reflection to discover and report on the bigdata build information. A + * <code>com.bigdata.BuildInfo</code> is built when the JAR is created. + * However, it may not be present when running under an IDE from the source + * code and, therefore, there MUST NOT be any compile time references to the + * <code>com.bigdata.BuildInfo</code> class. This method uses reflection to + * avoid a compile time dependency. + * <p> + * Note: This method works fine. However, the problem with exposing the + * information is that people running from an IDE can observe <em>stale</em> + * data from old <code>com.bigdata.BuildInfo</code> class files left from a + * previous build of a JAR. This makes the information good for deployed + * versions of the JAR but potentially misleading when people are running + * under an IDE. + * + * @return Build info metadata iff available. + */ + private synchronized static Map<String,String> getBuildInfo() { + + if (buildInfoRef.get() == null) { + + final Map<String,String> map = new LinkedHashMap<String, String>(); + + try { + + final Class<?> cls = Class.forName("com.bigdata.BuildInfo"); + + for (Field f : cls.getFields()) { + + final String name = f.getName(); + + final int mod = f.getModifiers(); + + if (!Modifier.isStatic(mod)) + continue; + + if (!Modifier.isPublic(mod)) + continue; + + if (!Modifier.isFinal(mod)) + continue; + + try { + + final Object obj = f.get(null/* staticField */); + + if (obj != null) { + + map.put(name, "" + obj); + + } + + } catch (IllegalArgumentException e) { + + log.warn("Field: " + name + " : " + e); + + } catch (IllegalAccessException e) { + + log.warn("Field: " + name + " : " + e); + + } + + } + + } catch (ClassNotFoundException e) { + + log.warn("Not found: " + "com.bigdata.BuildInfo"); + + } catch (Throwable t) { + + log.error(t, t); + + } + + // set at most once. + buildInfoRef.compareAndSet(null/* expect */, + Collections.unmodifiableMap(map)/* update */); + + } + + return buildInfoRef.get(); + + } + + private final static AtomicReference<Map<String, String>> buildInfoRef = new AtomicReference<Map<String, String>>(); + + private final static String getBuildString() { + + if (getBuildInfo().isEmpty()) + return ""; + + final StringBuilder s = new StringBuilder(); + + s.append("\nbuildVersion=" + getBuildInfo().get("buildVersion")); + +// s.append("\nsvnRevision =" + getBuildInfo().get("svnRevision")); + + return s.toString(); + + } + + /** * Outputs the banner and exits. * * @param args @@ -168,7 +333,7 @@ */ public static void main(final String[] args) { - System.out.println(banner); + banner(); } @@ -188,7 +353,8 @@ + " " + SystemUtil.architecture() + // "\n"+SystemUtil.cpuInfo() + " #CPU="+SystemUtil.numProcessors() +// "\n"+System.getProperty("java.vendor")+" "+System.getProperty("java.version")+ - "\n" + getBuildString()+ // Note: Will add its own newline if non-empty. + "\n\n" ; } Added: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Depends.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Depends.java (rev 0) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Depends.java 2011-09-21 19:27:13 UTC (rev 5230) @@ -0,0 +1,265 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 20, 2011 + */ + +package com.bigdata; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; + +/** + * Class provides static information about project dependencies. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class Depends { + + public interface Dependency { + + /** The component name. */ + String getName(); + + /** The project URL. */ + String projectURL(); + + /** The license URL. */ + String licenseURL(); + + }; + + private static class OrderByName implements Comparator<Dependency> { + + public int compare(Dependency o1, Dependency o2) { + return o1.getName().compareTo(o2.getName()); + } + + } + + private static class OrderByLicense implements Comparator<Dependency> { + + public int compare(Dependency o1, Dependency o2) { + return o1.licenseURL().compareTo(o2.licenseURL()); + } + + } + + /** + * Metadata for dependencies. + */ + private static class Dep implements Dependency { + + private final String component; + + private final String projectURL; + + private final String licenseURL; + + public Dep(final String component, final String projectURL, + final String licenseURL) { + + if (component == null || projectURL == null || licenseURL == null) + throw new IllegalArgumentException(); + + this.component = component; + this.projectURL = projectURL; + this.licenseURL = licenseURL; + + } + + final public String getName() { + return component; + } + + final public String projectURL() { + return projectURL; + } + + final public String licenseURL() { + return licenseURL; + } + + public String toString() { + + return "{name=" + component + ", project=" + projectURL + + ", license=" + licenseURL + "}"; + + } + + } + + /** + * An Apache project. + */ + private static class ApacheDep extends Dep { + + public ApacheDep(String component, String projectURL) { + super(component, projectURL, + "http://www.apache.org/licenses/LICENSE-2.0.html"); + } + + } + + /** + * A project which we are redistributing under LGPL v2.1. + */ + private static class LGPL21Dep extends Dep { + + public LGPL21Dep(String component, String projectURL) { + super(component, projectURL, + "http://www.gnu.org/licenses/lgpl-2.1.html"); + } + + } + + private final static Dep jini = new ApacheDep("jini", + "http://river.apache.org/"); + + private final static Dep zookeeper = new ApacheDep("zookeeper", + "http://hadoop.apache.org/zookeeper/"); + + private final static Dep log4j = new ApacheDep("log4j", + "http://logging.apache.org/log4j/1.2/"); + + private final static Dep lucene = new ApacheDep("lucene", + "http://lucene.apache.org/java/docs/index.html"); + + private final static Dep colt = new Dep("colt", + "http://acs.lbl.gov/software/colt/", + "http://acs.lbl.gov/software/colt/license.html"); + + private final static Dep dsiutils = new LGPL21Dep("dsiutils", + "http://dsiutils.dsi.unimi.it/"); + + private final static Dep fastutil = new Dep("fastutil", + "http://fastutil.dsi.unimi.it/", + "http://www.apache.org/licenses/LICENSE-2.0.html"); + + private final static Dep iris = new LGPL21Dep("iris", + "http://www.iris-reasoner.org"); + + private final static Dep jgrapht = new LGPL21Dep("jgrapht", + "http://www.jgrapht.org/"); + + private final static Dep tuprolog = new LGPL21Dep("tuprolog", + "http://www.alice.unibo.it/xwiki/bin/view/Tuprolog/"); + + private final static Dep highScaleLib = new Dep("high-scale-lib", + "https://sourceforge.net/projects/high-scale-lib/", + "http://creativecommons.org/licenses/publicdomain"); + + private final static Dep cweb = new Dep( + "cweb", + "http://www.cognitiveweb.org/", + "http://www.cognitiveweb.org/legal/license/CognitiveWebOpenSourceLicense-1.1.html"); + + private final static Dep flot = new Dep("flot", + "http://code.google.com/p/flot/", + "http://www.opensource.org/licenses/mit-license.php"); + + /** + * Dual licensed under the MIT (MIT-LICENSE.txt) and GPL (GPL-LICENSE.txt) + * licenses. (We use the MIT license). + */ + private final static Dep jquery = new Dep("jquery", + "http://jquery.com/", + "https://github.com/jquery/jquery/blob/master/MIT-LICENSE.txt"); + + private final static Dep slf4j = new Dep("slf4j", "http://www.slf4j.org/", + "http://www.slf4j.org/license.html"); + + private final static Dep sesame = new Dep("sesame", + "http://www.openrdf.org/", "http://www.openrdf.org/download.jsp"); + + private final static Dep icu = new Dep("ICU", + "http://site.icu-project.org/", + "http://source.icu-project.org/repos/icu/icu/trunk/license.html"); + + private final static Dep nxparser = new Dep("nxparser", + "http://sw.deri.org/2006/08/nxparser/", + "http://sw.deri.org/2006/08/nxparser/license.txt"); + + private final static Dep nanohttp = new Dep("nanohttp", + "http://elonen.iki.fi/code/nanohttpd/", + "http://elonen.iki.fi/code/nanohttpd/#license"); + + /** + * Dual licensed under apache 2.0 and Eclipse Public License 1.0. We use the + * Apache 2.0 license. + * + * @see http://www.eclipse.org/jetty/licenses.php + */ + private final static Dep jetty = new Dep("jetty", + "http://www.eclipse.org/jetty/", + "http://www.apache.org/licenses/LICENSE-2.0.html"); + + private final static Dep servletApi = new ApacheDep("servlet-api", + "http://tomcat.apache.org"); + + static private final Dep[] depends; + static { + depends = new Dep[] { // + // standalone + log4j,// + lucene,// + colt,// + dsiutils,// + fastutil,// + highScaleLib,// + cweb,// + slf4j,// + sesame,// + icu,// + nxparser,// + nanohttp,// + jetty,// + servletApi,// + // scale-out + jini,// + zookeeper,// + // javascript + flot,// + jquery,// + // linked, but not used. + iris,// + jgrapht,// + tuprolog,// + }; + Arrays.sort(depends, new OrderByName()); + } + + /** + * Return an unmodifiable list of the dependencies. + */ + static public final List<Dep> depends() { + + return Collections.unmodifiableList(Arrays.asList(depends)); + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Depends.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-gom/LICENSE.txt =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-gom/LICENSE.txt 2011-09-21 12:29:53 UTC (rev 5229) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-gom/LICENSE.txt 2011-09-21 19:27:13 UTC (rev 5230) @@ -1,40 +1,354 @@ -The Notice below must appear in each file of the Source Code of any -copy you distribute of the Licensed Product. Contributors to any -Modifications may add their own copyright notices to identify their -own contributions. +The GNU General Public License (GPL) +Version 2, June 1991 -License: +Copyright (C) 1989, 1991 Free Software Foundation, Inc. -The contents of this file are subject to the CognitiveWeb Open Source -License Version 1.1 (the License). You may not copy or use this file, -in either source code or executable form, except in compliance with -the License. You may obtain a copy of the License from +59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - http://www.CognitiveWeb.org/legal/license/ +Everyone is permitted to copy and distribute verbatim copies -Software distributed under the License is distributed on an AS IS -basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See -the License for the specific language governing rights and limitations -under the License. +of this license document, but changing it is not allowed. -Copyrights: -Portions created by or assigned to CognitiveWeb are Copyright -(c) 2003-2003 CognitiveWeb. All Rights Reserved. Contact -information for CognitiveWeb is available at +Preamble - http://www.CognitiveWeb.org -Portions Copyright (c) 2002-2003 Bryan Thompson. +The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. -Acknowledgements: +When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. -Special thanks to the developers of the Jabber Open Source License 1.0 -(JOSL), from which this License was derived. This License contains -terms that differ from JOSL. +To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. -Special thanks to the CognitiveWeb Open Source Contributors for their -suggestions and support of the Cognitive Web. +For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. -Modifications: +We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. +Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + +Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + +The precise terms and conditions for copying, distribution and +modification follow. + + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + +0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + +1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + +2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + +a) You must cause the modified files to carry prominent notices +stating that you changed the files and the date of any change. + +b) You must cause any work that you distribute or publish, that in +whole or in part contains or is derived from the Program or any +part thereof, to be licensed as a whole at no charge to all third +parties under the terms of this License. + +c) If the modified program normally reads commands interactively +when run, you must cause it, when started running for such +interactive use in the most ordinary way, to print or display an +announcement including an appropriate copyright notice and a +notice that there is no warranty (or else, saying that you provide +a warranty) and that users may redistribute the program under +these conditions, and telling the user how to view a copy of this +License. (Exception: if the Program itself is interactive but +does not normally print such an announcement, your work based on +the Program is not required to print an announcement.) + + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + +3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + +a) Accompany it with the complete corresponding machine-readable +source code, which must be distributed under the terms of Sections +1 and 2 above on a medium customarily used for software interchange; or, + +b) Accompany it with a written offer, valid for at least three +years, to give any third party, for a charge no more than your +cost of physically performing source distribution, a complete +machine-readable copy of the corresponding source code, to be +distributed under the terms of Sections 1 and 2 above on a medium +customarily used for software interchange; or, + +c) Accompany it with the information you received as to the offer +to distribute corresponding source code. (This alternative is +allowed only for noncommercial distribution and only if you +received the program in object code or executable form with such +an offer, in accord with Subsection b above.) + + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + +4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + +5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + +6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + +7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + +8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + +9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + +10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + +NO WARRANTY + +11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + +12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +END OF TERMS AND CONDITIONS + + +How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + +One line to give the program's name and a brief idea of what it does. + +Copyright (C) <year> <name of author> + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + +Gnomovision version 69, Copyright (C) year name of author +Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. +This is free software, and you are welcome to redistribute it +under certain conditions; type `show c' for details. + + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + +Yoyodyne, Inc., hereby disclaims all copyright interest in the program +`Gnomovision' (which makes passes at compilers) written by James Hacker. + +signature of Ty Coon, 1 April 1989 + +Ty Coon, President of Vice + + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. + Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-jini/LEGAL/NOTICE =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-jini/LEGAL/NOTICE 2011-09-21 12:29:53 UTC (rev 5229) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-jini/LEGAL/NOTICE 2011-09-21 19:27:13 UTC (rev 5230) @@ -1,5 +0,0 @@ -This project redistributes Jini 2.1. - -There is a dependency on the bigdata module. - -Other dependencies exist and are described in the bigdata module. Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-jini/LEGAL/README.txt =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-jini/LEGAL/README.txt 2011-09-21 12:29:53 UTC (rev 5229) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-jini/LEGAL/README.txt 2011-09-21 19:27:13 UTC (rev 5230) @@ -1,2 +1,8 @@ The lib directory contains some bundled library dependencies. The licenses for bundled dependencies are found in this directory (LEGAL). + +This module has a dependency on Jini 2.1 and Apache Zookeeper. + +There is also a dependency on the bigdata module. + +Other dependencies exist and are described in the bigdata module. Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/LEGAL/NOTICE =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/LEGAL/NOTICE 2011-09-21 12:29:53 UTC (rev 5229) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/LEGAL/NOTICE 2011-09-21 19:27:13 UTC (rev 5230) @@ -1 +1,6 @@ -We are bundling jetty under the Apache 2.0 license. \ No newline at end of file + +Portions of this software are derived from Sesame. + +Copyright Aduna (http://www.aduna-software.com/) \xA9 2001-2011 All rights reserved. + + Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/samples/com/bigdata/samples/quads.properties =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/samples/com/bigdata/samples/quads.properties 2011-09-21 12:29:53 UTC (rev 5229) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-sails/src/samples/com/bigdata/samples/quads.properties 2011-09-21 19:27:13 UTC (rev 5230) @@ -2,9 +2,18 @@ com.bigdata.rdf.store.AbstractTripleStore.axiomsClass=com.bigdata.rdf.axioms.NoAxioms com.bigdata.rdf.store.AbstractTripleStore.quads=true com.bigdata.rdf.store.AbstractTripleStore.statementIdentifiers=false +com.bigdata.rdf.store.AbstractTripleStore.textIndex=false +com.bigdata.rdf.sail.bufferCapacity=100000 + # turn off automatic inference in the SAIL com.bigdata.rdf.sail.truthMaintenance=false # The name of the backing file. com.bigdata.journal.AbstractJournal.file=bigdata.jnl +com.bigdata.journal.AbstractJournal.bufferMode=DiskRW + +com.bigdata.btree.writeRetentionQueue.capacity=4000 +com.bigdata.btree.BTree.branchingFactor=512 + +com.bigdata.rdf.store.DataLoader.commit=Incremental Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-war/RWStore.properties =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-war/RWStore.properties 2011-09-21 12:29:53 UTC (rev 5229) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-war/RWStore.properties 2011-09-21 19:27:13 UTC (rev 5230) @@ -15,6 +15,9 @@ com.bigdata.btree.writeRetentionQueue.capacity=4000 com.bigdata.btree.BTree.branchingFactor=128 +# Bump up the branching factor for the statement indices on the default kb. +com.bigdata.namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor=512 + # 200M initial extent. com.bigdata.journal.AbstractJournal.initialExtent=209715200 com.bigdata.journal.AbstractJournal.maximumExtent=209715200 Modified: branches/BIGDATA_RELEASE_1_0_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/build.properties 2011-09-21 12:29:53 UTC (rev 5229) +++ branches/BIGDATA_RELEASE_1_0_0/build.properties 2011-09-21 19:27:13 UTC (rev 5230) @@ -24,12 +24,16 @@ # debuglevel=lines,vars,source (or any combination thereof). javac.debuglevel=lines,vars,source javac.verbose=off -#javac.target=1.6 -#javac.source=1.6 +javac.source=1.6 +javac.target=1.6 javac.encoding=Cp1252 -# The zookeeper version. +# Versions to use of various dependencies. +icu.version=3.6 zookeeper.version=3.3.3 +sesame.version=2.3.0 +slf4j.version=1.4.3 +jetty.version=7.2.2.v20101205 # Set to false to NOT start services (zookeeper, lookup server, class server, etc). # When false, tests which depend on those services will not run. (This can also be Modified: branches/BIGDATA_RELEASE_1_0_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/build.xml 2011-09-21 12:29:53 UTC (rev 5229) +++ branches/BIGDATA_RELEASE_1_0_0/build.xml 2011-09-21 19:27:13 UTC (rev 5230) @@ -158,9 +158,29 @@ <!-- I set the target to 1.5 to support deployment on non-1.6 JVMs. --> <target name="compile" depends="prepare, buildinfo"> <mkdir dir="${build.dir}" /> - <javac destdir="${build.dir}/classes" classpathref="build.classpath" debug="${javac.debug}" debuglevel="${javac.debuglevel}" verbose="${javac.verbose}" encoding="${javac.encoding}"> + <echo>javac</echo> + <echo> destdir="${build.dir}"</echo> + <echo> fork="yes"</echo> + <echo> memorymaximumsize="1g"</echo> + <echo> debug="yes"</echo> + <echo> debuglevel="${javac.debuglevel}"</echo> + <echo> verbose="${javac.verbose}"</echo> + <echo> encoding="${javac.encoding}"</echo> + <echo> source="${javac.source}"</echo> + <echo> target="${javac.target}"</echo> + <javac classpathref="build.classpath" + destdir="${build.dir}/classes" + fork="yes" + memorymaximumsize="1g" + debug="${javac.debug}" + debuglevel="${javac.debuglevel}" + verbose="${javac.verbose}" + encoding="${javac.encoding}" + source="${javac.source}" + target="${javac.target}" + includeantruntime="false" + > <!-- note: must also specify -bootclasspath and -extdirs when cross-compiling --> - <!-- target="${javac.target}" source="${javac.source}" --> <src path="${bigdata.dir}/bigdata/src/java" /> <src path="${bigdata.dir}/bigdata-jini/src/java" /> <src path="${bigdata.dir}/bigdata-rdf/src/java" /> @@ -196,7 +216,39 @@ <fileset dir="${bigdata.dir}/bigdata-sails/src/resources/sesame-server"> <include name="META-INF/**" /> </fileset> + <!-- Copy WAR resources for the embedded NanoSparqlServer --> + <fileset dir="." includes="bigdata-war/src/**"/> + <!-- Copy the bigdata license. --> + <fileset file="${bigdata.dir}/LICENSE.txt"/> </copy> + <!-- Copy licenses for any project from which have imported something. --> + <mkdir dir="${build.dir}/classes/LEGAL"/> + <copy toDir="${build.dir}/classes/LEGAL" flatten="true"> + <fileset file="${bigdata.dir}/bigdata/LEGAL/apache-license-2_0.txt"/> + <fileset file="${bigdata.dir}/bigdata-rdf/LEGAL/sesame2.x-license.txt"/> + </copy> + <!-- Generate a combined NOTICE file for anything which we imported + into the source tree from which the JAR was generated. --> + <concat destfile="${build.dir}/classes/NOTICE" fixlastline="true" overwrite="yes"> + <filterchain> + <fixcrlf/> + </filterchain> + <header filtering="yes" trimleading="yes"> + Combined NOTICE files for bigdata JAR source code. + ================================================== + </header> + <fileset file="${bigdata.dir}/NOTICE"/> + <fileset file="${bigdata.dir}/bigdata/LEGAL/NOTICE"/> + <fileset file="${bigdata.dir}/bigdata-rdf/LEGAL/NOTICE"/> + <fileset file="${bigdata.dir}/bigdata-sails/LEGAL/NOTICE"/> + <fileset file="${bigdata.dir}/bigdata-jini/LEGAL/NOTICE"/> + <footer filtering="yes" trimleading="yes"> + ================================================== + Source code for included Open Source software is available + from the respective websites of the copyright holders of the + included software. + </footer> + </concat> </target> <!-- Builds the bigdata JAR and bundles it together with all of its dependencies in the ${build.dir}/lib directory. --> @@ -211,15 +263,13 @@ <target name="jar" depends="compile" description="Generates the jar (see also bundleJar)."> <jar destfile="${build.dir}/${version}.jar"> <fileset dir="${build.dir}/classes" excludes="test/**" /> - <!-- Copy WAR resources for the embedded NanoSparqlServer --> - <fileset dir="." includes="bigdata-war/src/**"/> <manifest> <!--<attribute name="Main-Class" value="com/bigdata/rdf/rio/TestRioIntegration"/>--> </manifest> </jar> </target> - <!-- This generates an osgi bundle jar, and does not bundled the dependencies. + <!-- This generates an osgi bundle jar, but does not bundle the dependencies. See 'bundleJar'. --> <target name="osgi" depends="compile, bundle" description="Generates the osgi bundle jar (see also bundleJar)."> <taskdef resource="aQute/bnd/ant/taskdef.properties" classpath="bigdata/lib/bnd-0.0.384.jar" /> @@ -235,7 +285,6 @@ <attribute name="Bundle-Description" value="Bigdata Source" /> </manifest> <fileset dir="bigdata/src/java" /> - <fileset dir="bigdata/src/java" /> <fileset dir="bigdata-jini/src/java" /> <fileset dir="bigdata-rdf/src/java" /> <fileset dir="bigdata-sails/src/java" /> @@ -251,7 +300,7 @@ <bndwrap jars="${build.dir}/lib/jgrapht-jdk1.5-0.7.1.jar" output="${build.dir}/bundles/jgrapht-jdk1.5-0.7.1.jar" definitions="${basedir}/osgi/" /> <bndwrap jars="${build.dir}/lib/lgpl-utils-1.0.6-020610.jar" output="${build.dir}/bundles/lgpl-utils-1.0.6-020610.jar" definitions="${basedir}/osgi/" /> <bndwrap jars="${build.dir}/lib/high-scale-lib-v1.1.2.jar" output="${build.dir}/bundles/high-scale-lib-v1.1.2.jar" definitions="${basedir}/osgi/" /> - <bndwrap jars="${build.dir}/lib/openrdf-sesame-2.3.0-onejar.jar" output="${build.dir}/bundles/openrdf-sesame-2.3.0.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/openrdf-sesame-${sesame.version}-onejar.jar" output="${build.dir}/bundles/openrdf-sesame-${sesame.version}.jar" definitions="${basedir}/osgi/" /> <bndwrap jars="${build.dir}/lib/apache/zookeeper-${zookeeper.version}.jar" output="${build.dir}/bundles/zookeeper-${zookeeper.version}.jar" definitions="${basedir}/osgi/" /> <bndwrap jars="${build.dir}/lib/nxparser-6-22-2010.jar" output="${build.dir}/bundles/nxparser-2010.6.22.jar" definitions="${basedir}/osgi/" /> </target> @@ -265,28 +314,42 @@ --> <target name="javadoc" depends="prepare" if="javadoc"> <mkdir dir="${build.dir}/docs/api" /> - <javadoc destdir="${build.dir}/docs/api" defaultexcludes="yes" author="true" version="true" use="true" overview="../bigdata/overview.html" windowtitle="bigdata®" classpathref="build.classpath"> + <javadoc destdir="${build.dir}/docs/api" defaultexcludes="yes" + author="true" version="true" use="true" + overview="${bigdata.dir}/overview.html" + windowtitle="bigdata®" + classpathref="build.classpath" + > <arg value="-J-Xmx1000m" /> <packageset dir="${bigdata.dir}/bigdata/src/java" /> + <packageset dir="${bigdata.dir}/bigdata/src/samples" /> <packageset dir="${bigdata.dir}/bigdata-jini/src/java" /> <packageset dir="${bigdata.dir}/bigdata-rdf/src/java" /> + <packageset dir="${bigdata.dir}/bigdata-rdf/src/samples" /> <packageset dir="${bigdata.dir}/bigdata-sails/src/java" /> <packageset dir="${bigdata.dir}/bigdata-sails/src/samples" /> <packageset dir="${bigdata.dir}/ctc-striterators/src/java" /> <doctitle> <![CDATA[<h1>bigdata®</h1>]]></doctitle> <bottom> - <![CDATA[<i>Copyright © 2006-2009 SYSTAP, LLC. All Rights Reserved.</i>]]></bottom> + <![CDATA[<i>Copyright © 2006-2011 SYSTAP, LLC. All Rights Reserved.</i>]]></bottom> <tag name="todo" scope="all" description="TODO:" /> <tag name="issue" scope="all" description="ISSUE:" /> <!--tag name="FIXME" scope="all" description="FIXME:"/--> - <link href="http://java.sun.com/j2se/1.5.0/docs/api/" /> - <link href="http://openrdf.org/doc/sesame/api/" /> + <link href="http://download.oracle.com/javase/6/docs/api/" /> + <link href="http://www.openrdf.org/doc/sesame2/api/" /> + <link href="http://lucene.apache.org/java/3_0_0/api/"/> </javadoc> </target> <target name="bundle" description="Bundles all dependencies for easier deployments and releases (does not bundle the bigdata jar)."> - <copy toDir="${build.dir}/lib" flatten="true"> + <!-- Copy all of the LEGAL directories. Do not flatten to avoid collisions. --> + <copy toDir="${build.dir}/LEGAL" flatten="false"> + <fileset dir="${bigdata.dir}"> + <include name="**/LEGAL/*" /> + </fileset> + </copy> + <copy toDir="${build.dir}/lib" flatten="true"> <fileset dir="${bigdata.dir}/bigdata/lib"> <include name="**/*.jar" /> <include name="**/*.so" /> @@ -314,6 +377,59 @@ </copy> </target> +<!-- + Note: you can not filter the files in the source jars using + zipgroupfileset. And you can not specify a wildcard of source files + using zipfileset. This leaves you with the choice of either creating + an uberjar and the filtering out the notice files or filtering out + the notice files for each source jar explicitly. + --> +<target name="gather-copyright-notice-files" + description="Locate combine all NOTICE(.txt) files for dependencies."> + <!-- Build an uber 'notice.jar' file from which we can later extract the + individual NOTICE files. This excludes the bigdata JAR to avoid + recursively including the aggregated NOTICE file in that JAR. --> + <jar destfile="${build.dir}/notice.jar"> + <zipgroupfileset dir="${build.dir}/lib" + includes="*.jar" + excludes="bigdata-${version}.jar" + /> + </jar> + <!-- Generate a combined NOTICE file. --> + <concat destfile="${build.dir}/NOTICE" fixlastline="true" overwrite="yes"> + <filterchain> + <fixcrlf/> + </filterchain> + <header filtering="yes" trimleading="yes"> + Combined NOTICE files for bigdata WAR. + ====================================== + </header> + <!-- NOTICE files for modules used to generate the bigdata JAR. --> + <fileset file="${bigdata.dir}/NOTICE"/> + <fileset file="${bigdata.dir}/bigdata/LEGAL/NOTICE"/> + <fileset file="${bigdata.dir}/bigdata-rdf/LEGAL/NOTICE"/> + <fileset file="${bigdata.dir}/bigdata-sails/LEGAL/NOTICE"/> + <fileset file="${bigdata.dir}/bigdata-jini/LEGAL/NOTICE"/> + <!-- Reach into the uber 'notice.jar' and pull out all notic... [truncated message content] |
From: <tho...@us...> - 2011-09-22 12:45:40
|
Revision: 5234 http://bigdata.svn.sourceforge.net/bigdata/?rev=5234&view=rev Author: thompsonbry Date: 2011-09-22 12:45:33 +0000 (Thu, 22 Sep 2011) Log Message: ----------- The SameVariableConstraint was being attached to the AccessPath by an inline TupleFilter subclass. This was causing the SPOAccessPath reference to be dragged in, which resulted in the not serializable exception. A top-level SameVariableConstraintTupleFilter class was created to replace the inline class. The AccessPath constructor was modified to use this top-level class and thus avoid dragging in the AccessPath instance when the filter is serialized. This issue was observed in the development branch (TERMS_REFACTOR_BRANCH). However, it seems likely that the same exception could appear in the 1.0.x release. The changes were applied to both the 1.0.x maintenance branch and the development branch. @see https://sourceforge.net/apps/trac/bigdata/ticket/379 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/SameVariableConstraintTupleFilter.java branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/SameVariableConstraintTupleFilter.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java 2011-09-21 20:51:38 UTC (rev 5233) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java 2011-09-22 12:45:33 UTC (rev 5234) @@ -504,12 +504,8 @@ if (indexLocalFilter != null) tmp.addFilter(indexLocalFilter); - tmp.addFilter(new TupleFilter(){ - private static final long serialVersionUID = 1L; - @Override - protected boolean isValid(ITuple tuple) { - return sameVarConstraint.isValid(tuple.getObject()); - }}); + tmp.addFilter(new SameVariableConstraintTupleFilter<R>( + sameVarConstraint)); this.indexLocalFilter = tmp; Added: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/SameVariableConstraintTupleFilter.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/SameVariableConstraintTupleFilter.java (rev 0) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/SameVariableConstraintTupleFilter.java 2011-09-22 12:45:33 UTC (rev 5234) @@ -0,0 +1,64 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 22, 2011 + */ + +package com.bigdata.relation.accesspath; + +import java.io.Serializable; + +import com.bigdata.bop.ap.filter.SameVariableConstraint; +import com.bigdata.btree.ITuple; +import com.bigdata.btree.filter.TupleFilter; + +/** + * {@link TupleFilter} class wrapping the {@link SameVariableConstraint}. + * <p> + * Note: This filter can execute local to the index shard in scale-out. + * Therefore it MUST NOT have a reference to the {@link AccessPath} in order to + * be {@link Serializable}. This used to be an "inline" class in + * {@link AccessPath}. It was promoted to a top-level class for this reason. + */ +public class SameVariableConstraintTupleFilter<E> extends TupleFilter<E> { + + private static final long serialVersionUID = 1L; + + private final SameVariableConstraint<E> sameVariableConstraint; + + SameVariableConstraintTupleFilter( + final SameVariableConstraint<E> sameVariableConstraint) { + + this.sameVariableConstraint = sameVariableConstraint; + + } + + @Override + protected boolean isValid(final ITuple tuple) { + + return sameVariableConstraint.isValid(tuple.getObject()); + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/relation/accesspath/SameVariableConstraintTupleFilter.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java 2011-09-21 20:51:38 UTC (rev 5233) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java 2011-09-22 12:45:33 UTC (rev 5234) @@ -52,12 +52,10 @@ import com.bigdata.btree.IIndex; import com.bigdata.btree.ILocalBTreeView; import com.bigdata.btree.IRangeQuery; -import com.bigdata.btree.ITuple; import com.bigdata.btree.ITupleIterator; import com.bigdata.btree.IndexSegment; import com.bigdata.btree.Tuple; import com.bigdata.btree.UnisolatedReadWriteIndex; -import com.bigdata.btree.filter.TupleFilter; import com.bigdata.btree.isolation.IsolatedFusedView; import com.bigdata.btree.keys.IKeyBuilder; import com.bigdata.btree.proc.ISimpleIndexProcedure; @@ -504,12 +502,8 @@ if (indexLocalFilter != null) tmp.addFilter(indexLocalFilter); - tmp.addFilter(new TupleFilter(){ - private static final long serialVersionUID = 1L; - @Override - protected boolean isValid(ITuple tuple) { - return sameVarConstraint.isValid(tuple.getObject()); - }}); + tmp.addFilter(new SameVariableConstraintTupleFilter<R>( + sameVarConstraint)); this.indexLocalFilter = tmp; @@ -535,7 +529,7 @@ toKey = keyOrder.getToKey(keyBuilder, predicate); } - + public String toString() { return getClass().getName() Added: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/SameVariableConstraintTupleFilter.java =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/SameVariableConstraintTupleFilter.java (rev 0) +++ branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/SameVariableConstraintTupleFilter.java 2011-09-22 12:45:33 UTC (rev 5234) @@ -0,0 +1,64 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 22, 2011 + */ + +package com.bigdata.relation.accesspath; + +import java.io.Serializable; + +import com.bigdata.bop.ap.filter.SameVariableConstraint; +import com.bigdata.btree.ITuple; +import com.bigdata.btree.filter.TupleFilter; + +/** + * {@link TupleFilter} class wrapping the {@link SameVariableConstraint}. + * <p> + * Note: This filter can execute local to the index shard in scale-out. + * Therefore it MUST NOT have a reference to the {@link AccessPath} in order to + * be {@link Serializable}. This used to be an "inline" class in + * {@link AccessPath}. It was promoted to a top-level class for this reason. + */ +public class SameVariableConstraintTupleFilter<E> extends TupleFilter<E> { + + private static final long serialVersionUID = 1L; + + private final SameVariableConstraint<E> sameVariableConstraint; + + SameVariableConstraintTupleFilter( + final SameVariableConstraint<E> sameVariableConstraint) { + + this.sameVariableConstraint = sameVariableConstraint; + + } + + @Override + protected boolean isValid(final ITuple tuple) { + + return sameVariableConstraint.isValid(tuple.getObject()); + + } + +} Property changes on: branches/TERMS_REFACTOR_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/SameVariableConstraintTupleFilter.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-09-22 15:11:32
|
Revision: 5235 http://bigdata.svn.sourceforge.net/bigdata/?rev=5235&view=rev Author: thompsonbry Date: 2011-09-22 15:11:19 +0000 (Thu, 22 Sep 2011) Log Message: ----------- Unbundled LUBM and BSBM entirely. People can go get this stuff themselves. Refactored the wiki pages so they are not comingled with the pages for the cluster installer, etc. Published the modified LUBM version at http://www.systap.com/bigdata/bigdata-lubm.tgz. See http://swat.cse.lehigh.edu/projects/lubm/ for the original LUBM code. See https://sourceforge.net/projects/bsbmtools/ for the BSBM code. build.xml : removed "run-performance-tests" target. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/README.txt branches/BIGDATA_RELEASE_1_0_0/build.xml branches/TERMS_REFACTOR_BRANCH/bigdata-perf/bsbm3/README.txt branches/TERMS_REFACTOR_BRANCH/bigdata-perf/bsbm3/build.xml branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/README.txt branches/TERMS_REFACTOR_BRANCH/build.xml Removed Paths: ------------- branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/bsbm/ branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/bsbm3/bsbmtools/ branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/LEGAL/ branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/RWStore.properties branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/WORMStore.properties branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/build.properties branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/build.xml branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/lib/ branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/src/ branches/TERMS_REFACTOR_BRANCH/bigdata-perf/bsbm/ branches/TERMS_REFACTOR_BRANCH/bigdata-perf/bsbm3/bsbmtools/ branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/LEGAL/ branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/RWStore.properties branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/WORMStore.properties branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/build.properties branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/build.xml branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/lib/ branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/src/ Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/README.txt =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/README.txt 2011-09-22 12:45:33 UTC (rev 5234) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/README.txt 2011-09-22 15:11:19 UTC (rev 5235) @@ -1,2 +1,11 @@ -This module contains an integration with LUBM (the Lehigh University -Benchmark). \ No newline at end of file +The LUBM benchmark can be downloaded from [1]. Directions on its use are +available from the project home page. You can download a modified version of +the LUBM benchmark which can make it a bit easier to use with bigdata from [2]. +Please contact the project maintainers if you have questions about this modified +version of the LUBM benchmark. See [3] and [4] for information about running +LUBM with bigdata. + +[1] http://swat.cse.lehigh.edu/projects/lubm/ +[2] http://www.systap.com/bigdata/bigdata-lubm.tgz +[3] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=LUBM +[4] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=LUBM_Cluster Deleted: branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/RWStore.properties =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/RWStore.properties 2011-09-22 12:45:33 UTC (rev 5234) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/RWStore.properties 2011-09-22 15:11:19 UTC (rev 5235) @@ -1,155 +0,0 @@ -# -# Note: These options are applied when the journal and the triple store are -# created. If you want to modify options after than you must do so using -# [ant set-properties] or by overriding appropriate properties on the command -# line. - -## -## Journal options. -## - -# Disk is the worm store. DiskRW is the read/write store. -com.bigdata.journal.AbstractJournal.bufferMode=DiskRW -com.bigdata.btree.writeRetentionQueue.capacity=8000 -com.bigdata.btree.BTree.branchingFactor=128 - -# RWStore options. -#com.bigdata.rwstore.RWStore.allocationSizes=1, 2, 3, 5, 8, 12, 16, 32, 48, 64, 128 -#com.bigdata.rwstore.RWStore.allocationSizes=1, 2, 3, 5, 8, 12, 16, 32, 48, 64, 128, 3520 -#com.bigdata.rwstore.RWStore.allocationSizes=1, 2, 3, 5, 8, 12, 16, 32, 48, 64, 128, 192, 320, 512, 832, 1344, 2176, 3520 - -# -# Overrides for various indices. -# - -# U50 4k pages. -com.bigdata.namespace.LUBM_U50.lex.TERM2ID.com.bigdata.btree.BTree.branchingFactor=270 -com.bigdata.namespace.LUBM_U50.lex.ID2TERM.com.bigdata.btree.BTree.branchingFactor=120 -com.bigdata.namespace.LUBM_U50.spo.POS.com.bigdata.btree.BTree.branchingFactor=970 -com.bigdata.namespace.LUBM_U50.spo.SPO.com.bigdata.btree.BTree.branchingFactor=512 -com.bigdata.namespace.LUBM_U50.spo.OSP.com.bigdata.btree.BTree.branchingFactor=470 - -# U1000 4k -com.bigdata.namespace.LUBM_U1000.lex.TERM2ID.com.bigdata.btree.BTree.branchingFactor=270 -com.bigdata.namespace.LUBM_U1000.lex.ID2TERM.com.bigdata.btree.BTree.branchingFactor=120 -com.bigdata.namespace.LUBM_U1000.spo.POS.com.bigdata.btree.BTree.branchingFactor=970 -com.bigdata.namespace.LUBM_U1000.spo.SPO.com.bigdata.btree.BTree.branchingFactor=512 -com.bigdata.namespace.LUBM_U1000.spo.OSP.com.bigdata.btree.BTree.branchingFactor=470 - -# U50 8k pages. -#com.bigdata.namespace.LUBM_U50.lex.TERM2ID.com.bigdata.btree.BTree.branchingFactor=540 -#com.bigdata.namespace.LUBM_U50.lex.ID2TERM.com.bigdata.btree.BTree.branchingFactor=240 -#com.bigdata.namespace.LUBM_U50.spo.POS.com.bigdata.btree.BTree.branchingFactor=1940 -#com.bigdata.namespace.LUBM_U50.spo.SPO.com.bigdata.btree.BTree.branchingFactor=1024 -#com.bigdata.namespace.LUBM_U50.spo.OSP.com.bigdata.btree.BTree.branchingFactor=940 - -# Override the #of write cache buffers. -#com.bigdata.journal.AbstractJournal.writeCacheBufferCount=12 - -# 200M initial extent. -com.bigdata.journal.AbstractJournal.initialExtent=209715200 -com.bigdata.journal.AbstractJournal.maximumExtent=209715200 - -## -## Triple store options. -## - -com.bigdata.rdf.store.AbstractTripleStore.quads=false -com.bigdata.rdf.store.AbstractTripleStore.statementIdentifiers=false -com.bigdata.rdf.store.AbstractTripleStore.textIndex=false - -# 50000 is the default. -#com.bigdata.rdf.store.AbstractTripleStore.termCache.capacity=50000 - -# When "true", the store will perform incremental closure as the data -# are loaded. When "false", the closure will be computed after all data -# are loaded. (Actually, since we are not loading through the SAIL -# making this true does not cause incremental TM but it does disable -# closure, so "false" is what you need here). -com.bigdata.rdf.sail.truthMaintenance=false - -# -# Option to restrict ourselves to RDFS only inference. This condition -# may be compared readily to many other stores. -# -# Note: While we can turn on some kinds of owl processing (e.g., -# TransitiveProperty, see below), we can not compute all the necessary -# entailments (only queries 11 and 13 benefit). -# -# Note: There are no owl:sameAs assertions in LUBM. -# -# Note: lubm query does not benefit from owl:inverseOf. -# -# Note: lubm query does benefit from owl:TransitiveProperty (queries 11 -# and 13). -# -# Note: owl:Restriction (which we can not compute) plus -# owl:TransitiveProperty is required to get all the answers for LUBM. -# -# @todo disable the backchainer for LDS, EDS, JDS. -# -#com.bigdata.rdf.store.AbstractTripleStore.axiomsClass=com.bigdata.rdf.axioms.NoAxioms -com.bigdata.rdf.store.AbstractTripleStore.axiomsClass=com.bigdata.rdf.axioms.RdfsAxioms - -# 10000 is default. -#com.bigdata.rdf.sail.bufferCapacity=10000 - -# Produce a full closure (all entailments) so that the backward chainer -# is always a NOP. Note that the configuration properties are stored in -# the database (in the global row store) so you always get exactly the -# same configuration that you created when reopening a triple store. - -# properties.setProperty(Options.FORWARD_CHAIN_RDF_TYPE_RDFS_RESOURCE, "true"); -# properties.setProperty(Options.FORWARD_CHAIN_OWL_SAMEAS_PROPERTIES, "true"); - -# Additional owl inferences. LUBM only both inverseOf and -# TransitiveProperty of those that we support (owl:sameAs, -# owl:inverseOf, owl:TransitiveProperty), but not owl:sameAs. -com.bigdata.rdf.rules.InferenceEngine.forwardChainOwlInverseOf=true -com.bigdata.rdf.rules.InferenceEngine.forwardChainOwlTransitiveProperty=true - -# Note: FastClosure is the default. -#com.bigdata.rdf.store.AbstractTripleStore.closureClass=com.bigdata.rdf.rules.FastClosure -#com.bigdata.rdf.store.AbstractTripleStore.closureClass=com.bigdata.rdf.rules.FullClosure - -# Turn off incremental closure in the DataLoader object. -com.bigdata.rdf.store.DataLoader.closure=None - -# Turn off commit in the DataLoader object. We do not need to commit -# anything until we have loaded all the data and computed the closure -# over the database. -com.bigdata.rdf.store.DataLoader.commit=None - -# Turn off Unicode support for index keys (this is a big win for load -# rates since LUBM does not use Unicode data, but it has very little -# effect on query rates since the only time we generate Unicode sort -# keys is when resolving the Values in the queries to term identifiers -# in the database). -com.bigdata.btree.keys.KeyBuilder.collator=ASCII - -# Turn on bloom filter for the SPO index (good up to ~2M index entries -# for scale-up -or- for any size index for scale-out). -com.bigdata.rdf.store.AbstractTripleStore.bloomFilter=true - -# Turn off justifications (impacts only the load performance, but -# it is a big impact and only required if you will be doing TM). -com.bigdata.rdf.store.AbstractTripleStore.justify=false - -# Maximum #of subqueries to evaluate concurrently for the 1st join -# dimension for native rules. Zero disables the use of an executor -# service. One forces a single thread, but runs the subquery on the -# executor service. N>1 is concurrent subquery evaluation. -#com.bigdata.relation.rule.eval.maxParallelSubqueries=5 -com.bigdata.relation.rule.eval.maxParallelSubqueries=0 - -# May be used to turn off query-time expansion of entailments such as -# (x rdf:type rdfs:Resource) and owl:sameAs even through those -# entailments were not materialized during forward closure. (This -# property is interpreted by the BigdataSail). -com.bigdata.rdf.sail.queryTimeExpander=false - -# Note: LUBM uses blank nodes. Therefore re-loading LUBM will always -# cause new statements to be asserted and result in the closure being -# updated if it is recomputed. You can work around this using this -# property. -#com.bigdata.rdf.store.AbstractTripleStore.storeBlankNodes=true Deleted: branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/WORMStore.properties =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/WORMStore.properties 2011-09-22 12:45:33 UTC (rev 5234) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/WORMStore.properties 2011-09-22 15:11:19 UTC (rev 5235) @@ -1,121 +0,0 @@ -# -# Note: These options are applied when the journal and the triple store are -# created. If you want to modify options after than you must do so using -# [ant set-properties] or by overriding appropriate properties on the command -# line. - -## -## Journal options. -## - -# Disk is the worm store. DiskRW is the read/write store. -com.bigdata.journal.AbstractJournal.bufferMode=DiskWORM -com.bigdata.btree.writeRetentionQueue.capacity=8000 - -# 200M initial extent. -com.bigdata.journal.AbstractJournal.initialExtent=209715200 -com.bigdata.journal.AbstractJournal.maximumExtent=209715200 - -## -## Triple store options. -## - -com.bigdata.rdf.store.AbstractTripleStore.quads=false -com.bigdata.rdf.store.AbstractTripleStore.statementIdentifiers=false -com.bigdata.rdf.store.AbstractTripleStore.textIndex=false - -# 50000 is the default. -#com.bigdata.rdf.store.AbstractTripleStore.termCache.capacity=50000 - -# When "true", the store will perform incremental closure as the data -# are loaded. When "false", the closure will be computed after all data -# are loaded. (Actually, since we are not loading through the SAIL -# making this true does not cause incremental TM but it does disable -# closure, so "false" is what you need here). -com.bigdata.rdf.sail.truthMaintenance=false - -# -# Option to restrict ourselves to RDFS only inference. This condition -# may be compared readily to many other stores. -# -# Note: While we can turn on some kinds of owl processing (e.g., -# TransitiveProperty, see below), we can not compute all the necessary -# entailments (only queries 11 and 13 benefit). -# -# Note: There are no owl:sameAs assertions in LUBM. -# -# Note: lubm query does not benefit from owl:inverseOf. -# -# Note: lubm query does benefit from owl:TransitiveProperty (queries 11 -# and 13). -# -# Note: owl:Restriction (which we can not compute) plus -# owl:TransitiveProperty is required to get all the answers for LUBM. -# -# @todo disable the backchainer for LDS, EDS, JDS. -# -#com.bigdata.rdf.store.AbstractTripleStore.axiomsClass=com.bigdata.rdf.axioms.NoAxioms -com.bigdata.rdf.store.AbstractTripleStore.axiomsClass=com.bigdata.rdf.axioms.RdfsAxioms - -# 10000 is default. -com.bigdata.rdf.sail.bufferCapacity=100000 - -# Produce a full closure (all entailments) so that the backward chainer -# is always a NOP. Note that the configuration properties are stored in -# the database (in the global row store) so you always get exactly the -# same configuration that you created when reopening a triple store. - -# properties.setProperty(Options.FORWARD_CHAIN_RDF_TYPE_RDFS_RESOURCE, "true"); -# properties.setProperty(Options.FORWARD_CHAIN_OWL_SAMEAS_PROPERTIES, "true"); - -# Additional owl inferences. LUBM only both inverseOf and -# TransitiveProperty of those that we support (owl:sameAs, -# owl:inverseOf, owl:TransitiveProperty), but not owl:sameAs. -com.bigdata.rdf.rules.InferenceEngine.forwardChainOwlInverseOf=true -com.bigdata.rdf.rules.InferenceEngine.forwardChainOwlTransitiveProperty=true - -# Note: FastClosure is the default. -#com.bigdata.rdf.store.AbstractTripleStore.closureClass=com.bigdata.rdf.rules.FastClosure -#com.bigdata.rdf.store.AbstractTripleStore.closureClass=com.bigdata.rdf.rules.FullClosure - -# Turn off incremental closure in the DataLoader object. -com.bigdata.rdf.store.DataLoader.closure=None - -# Turn off commit in the DataLoader object. We do not need to commit -# anything until we have loaded all the data and computed the closure -# over the database. -com.bigdata.rdf.store.DataLoader.commit=None - -# Turn off Unicode support for index keys (this is a big win for load -# rates since LUBM does not use Unicode data, but it has very little -# effect on query rates since the only time we generate Unicode sort -# keys is when resolving the Values in the queries to term identifiers -# in the database). -com.bigdata.btree.keys.KeyBuilder.collator=ASCII - -# Turn on bloom filter for the SPO index (good up to ~2M index entries -# for scale-up -or- for any size index for scale-out). -com.bigdata.rdf.store.AbstractTripleStore.bloomFilter=true - -# Turn off justifications (impacts only the load performance, but -# it is a big impact and only required if you will be doing TM). -com.bigdata.rdf.store.AbstractTripleStore.justify=false - -# Maximum #of subqueries to evaluate concurrently for the 1st join -# dimension for native rules. Zero disables the use of an executor -# service. One forces a single thread, but runs the subquery on the -# executor service. N>1 is concurrent subquery evaluation. -#com.bigdata.relation.rule.eval.maxParallelSubqueries=5 -com.bigdata.relation.rule.eval.maxParallelSubqueries=0 - -# May be used to turn off query-time expansion of entailments such as -# (x rdf:type rdfs:Resource) and owl:sameAs even through those -# entailments were not materialized during forward closure. (This -# property is interpreted by the BigdataSail). -com.bigdata.rdf.sail.queryTimeExpander=false - -# Note: LUBM uses blank nodes. Therefore re-loading LUBM will always -# cause new statements to be asserted and result in the closure being -# updated if it is recomputed. You can work around this using this -# property. -#com.bigdata.rdf.store.AbstractTripleStore.storeBlankNodes=true Deleted: branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/build.properties 2011-09-22 12:45:33 UTC (rev 5234) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/build.properties 2011-09-22 15:11:19 UTC (rev 5235) @@ -1,140 +0,0 @@ -# ant build properties. - -# the base directory -lubm.dir=. -# the bigdata base directory -bigdata.dir=../.. - -# Where the generated files will be written. -build.dir=ant-build -# Where to find the pre-built bigdata classes. -bigdata.build.dir=${bigdata.dir}/ant-build - -bigdata.install.lib.dir=${bigdata.dir}/ - -## -# javac options -## - -# debug=on|off -javac.debug=on -# debuglevel=lines,vars,source (or any combination thereof). -javac.debuglevel=lines,vars,source -javac.verbose=off -#javac.target=1.6 -#javac.source=1.6 -javac.encoding=Cp1252 - -# lubm properties. -# -# Note: By default, the files will wind up in ./ant-build/bin - -# The port at which the NanoSparqlServer will respond (if started). -# -# Note: You MUST also edit src/resources/config/config.kb.sparql to -# change the port against which the queries will be issued. -# -lubm.nanoServerPort=80 - -# The maximum size of the java heap for the LUBM test runs. -lubm.maxMem=4g - -# The data set size (U50, U1000, etc.) -lubm.univ=50 - -# The namespace of the KB instance (multiple KBs can be in the same database). -lubm.namespace=LUBM_U${lubm.univ} - -# Laptop benchmark data directory. -#lubm.baseDir=d:/bigdata-perf-analysis/lubm/U${lubm.univ} -# Server benchmark directory. -lubm.baseDir=/nas/data/lubm/U${lubm.univ} -# Windows Server 2008 benchmark data directory. -#lubm.baseDir=c:/usr/local/data/lubm/lubm_${lubm.univ} - -## Where to put the XML results files. -#bsbm.resultsDir=${bsbm.baseDir}/.. - -# The ontology file. -lubm.ontologyFile=src/resources/config/univ-bench.owl - -# The directory in which the generator writes its data. -lubm.dataDir=${lubm.baseDir}/data - -# Generate ntriples. -lubm.outputType=nt - -# Specify "GZip", "Zip", or "None" -lubm.compressType=GZip - -# Which mode to use for the Journal. (DiskRW or DiskWORM) -journalMode=RW -#journalMode=WORM - -## The name of the directory containing the generated RDF data without the filename extension. -#lubm.outputFile=${lubm.baseDir} - -# The name of the file used to configure the journal. -lubm.journalPropertyFile=${journalMode}Store.properties - -# The name of the file used for the journal. -#lubm.journalFile=${lubm.baseDir}/bigdata-lubm.${journalMode}.jnl -# Note: This is on the large volume. -lubm.journalFile=/data/lubm/U${lubm.univ}/bigdata-lubm.${journalMode}.jnl -# SSD. -#lubm.journalFile=e:/data/lubm/U${lubm.univ}/bigdata-lubm.${journalMode}.jnl -# SAS -#lubm.journalFile=f:/data/lubm/U${lubm.univ}/bigdata-lubm.${journalMode}.jnl - -# The database to test. -lubm.configFile=src/resources/config/config.kb.sparql - -# The queries to run. -lubm.queryFile=src/resources/config/config.query.sparql -#lubm.queryFile=config.query9.sparql - -# -# Query parameters. -# - -# -# Profiler parameters. -# - -# No profiler. -profilerAgent= -# linux-64 -#profilerAgent=-agentpath:/usr/java/yjp-9.0.3/bin/linux-x86-64/libyjpagent.so -# Windows -#profilerAgent="-agentpath:C:/Program Files/YourKit Java Profiler 9.0.1/bin/win32/yjpagent.dll" - -# No profiler. -profilerAgentOptions= -# all profiling initially disabled. -#profilerAgentOptions=-agentlib:yjpagent=disableexceptiontelemetry,disablestacktelemetry - -profiler=${profilerAgent} ${profilerAgentOptions} - -# Configure GC. -gcopts= -#gcopts=-verbose:gc -#gcopts=-XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -gcopts=-XX:+UseParallelOldGC - -# Generates detailed logging on the JVM GC behavior. The service will -# start in the configured service directory, so the log file will be in -# that directory as well. The service directory is typically on local -# disk, so that is where you need to look for this file. -gcdebug= -#gcdebug=-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -Xloggc:jvm_gc.log - -# The record cache (empty for the default cache). -cache= -#cache=-Dcom.bigdata.LRUNexus.enabled=false -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.LRUNexus.threadLocalBuffers=true -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.LRUNexus.limitingCapacity=2000000 -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler - -# all jvm args for query. -queryJvmArgs=-server -Xmx${lubm.maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=file:src/resources/logging/log4j.properties Deleted: branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/build.xml 2011-09-22 12:45:33 UTC (rev 5234) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata-perf/lubm/build.xml 2011-09-22 15:11:19 UTC (rev 5235) @@ -1,132 +0,0 @@ -<!-- $Id: build.xml 2266 2009-10-26 18:21:50Z mrpersonick $ --> -<!-- --> -<!-- do "ant bundle-jar" in the parent directory first. --> -<!-- --> -<project name="lubm" default="compile" basedir="."> - - <property file="build.properties" /> - - <!-- build-time classpath. --> - <path id="build.classpath"> - <fileset dir="${lubm.dir}/lib"> - <include name="**/*.jar" /> - </fileset> - <!-- The bigdata dependencies (for the nano-server and the Sesame jars). --> - <fileset dir="${bigdata.build.dir}/lib"> - <include name="**/*.jar" /> - </fileset> - </path> - - <!-- runtime classpath w/o install. --> - <path id="runtime.classpath"> - <!-- The compiled LUBM classes. --> - <pathelement location="${build.dir}/classes" /> - <!-- The LUBM dependencies. --> - <fileset dir="${lubm.dir}/lib"> - <include name="**/*.jar" /> - </fileset> - <!-- The bigdata dependencies (for the nano-server and the Sesame jars). --> - <fileset dir="${bigdata.build.dir}/lib"> - <include name="**/*.jar" /> - </fileset> - <path refid="build.classpath" /> - </path> - - <target name="clean" description="cleans everything in [build.dir]"> - <delete dir="${build.dir}" /> - </target> - - <target name="prepare"> - <!-- create directories. --> - <mkdir dir="${build.dir}" /> - <mkdir dir="${build.dir}/classes" /> - <mkdir dir="${build.dir}/bin" /> - </target> - - <target name="compile" depends="prepare" description="Compile the benchmark."> - <javac destdir="${build.dir}/classes" classpathref="build.classpath" debug="${javac.debug}" debuglevel="${javac.debuglevel}" verbose="${javac.verbose}" encoding="${javac.encoding}"> - <src path="${lubm.dir}/src/java" /> - <compilerarg value="-version" /> - </javac> - <!-- copy resources. --> - <copy toDir="${build.dir}/classes"> - <fileset dir="${lubm.dir}/src/java"> - <exclude name="**/*.java" /> - <exclude name="**/package.html" /> - </fileset> - </copy> - <copy toDir="${build.dir}/bin"> - <!-- copy benchmark data and queries. --> - <fileset dir="${lubm.dir}/src/resources/config" /> - <!-- copy the journal configuration file. --> - <fileset file="${lubm.dir}/*.properties" /> - <!-- copy log4j configuration file. --> - <fileset dir="${lubm.dir}/src/resources/logging" /> - </copy> - </target> - - <target name="run-generator" depends="compile"> - <mkdir dir="${lubm.baseDir}" /> - <mkdir dir="${lubm.dataDir}" /> - <java classname="edu.lehigh.swat.bench.uba.Generator" fork="true" failonerror="true" dir="${lubm.dataDir}"> - <arg value="-subdirs" /> - <arg value="-univ" /> - <arg value="${lubm.univ}" /> - <arg value="-onto" /> - <arg value="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl" /> - <arg value="-compress" /> - <arg value="${lubm.compressType}" /> - <jvmarg value="-Xmx400m" /> - <classpath> - <path refid="runtime.classpath" /> - </classpath> - </java> - </target> - - <!-- Note: split data files and use RDFDataLoadMaster for scale-out. --> - <target name="run-load" depends="compile"> - <!-- delete file if it exists so we load into a new journal. --> - <delete verbose="true" file="${lubm.journalFile}" /> - <java classname="com.bigdata.rdf.store.DataLoader" fork="true" failonerror="true" ><!--dir="${build.dir}/bin"--> - <arg line="-closure -namespace ${lubm.namespace} ${lubm.journalPropertyFile} ${lubm.ontologyFile} ${lubm.dataDir}" /> - <!-- specify/override the journal file name. --> - <jvmarg line="${queryJvmArgs} -Dcom.bigdata.journal.AbstractJournal.file=${lubm.journalFile} - -Dcom.bigdata.io.DirectBufferPool.bufferCapacity=10485760 - " /> - <classpath> - <path refid="runtime.classpath" /> - </classpath> - </java> - </target> - - <target name="start-nano-server" depends="compile" description="Start a small http server fronting for a bigdata database instance."> - <echo message="propertyFile=${lubm.journalPropertyFile}"/> - <echo message="journalFile=${lubm.journalFile}"/> - <echo message="namespace=${lubm.namespace}"/> - <echo message="port=${lubm.nanoServerPort}"/> - <java classname="com.bigdata.rdf.sail.webapp.NanoSparqlServer" fork="true" failonerror="true"><!-- dir="${build.dir}/bin"--> - <arg line="${lubm.nanoServerPort} ${lubm.namespace} ${lubm.journalPropertyFile}" /> - <!-- specify/override the journal file name. --> - <jvmarg line="${queryJvmArgs} -Dcom.bigdata.journal.AbstractJournal.file=${lubm.journalFile}" /> - <classpath> - <path refid="runtime.classpath" /> - </classpath> - </java> - </target> - - <target name="run-query" depends="compile" description="Runs the benchmark queries against the loaded data."> - <java classname="edu.lehigh.swat.bench.ubt.Test" fork="true" failonerror="true" ><!--dir="${build.dir}/bin"--> - <jvmarg value="-Dlubm.warmUp=false" /> - <jvmarg value="-Dlubm.queryTime=10" /> - <jvmarg value="-Dlubm.queryParallel=1" /> - <jvmarg value="-Dlog4j.configuration=file:src/resources/logging/log4j.properties"/> - <arg value="query" /> - <arg value="${lubm.configFile}" /> - <arg value="${lubm.queryFile}" /> - <classpath> - <path refid="runtime.classpath" /> - </classpath> - </java> - </target> - -</project> Modified: branches/BIGDATA_RELEASE_1_0_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/build.xml 2011-09-22 12:45:33 UTC (rev 5234) +++ branches/BIGDATA_RELEASE_1_0_0/build.xml 2011-09-22 15:11:19 UTC (rev 5235) @@ -2189,100 +2189,6 @@ </target> <!-- --> -<!-- PERFORMANCE TESTS --> -<!-- --> - -<target name="run-performance-tests" depends="testCompile" description="Runs a variety of performance tests."> -<!-- Note: This depends on the stage target. --> -<path id="run.class.path.id"> - <pathelement location="${junit.jar}" /> - <pathelement location="${bigdata-test.jar}" /> - <pathelement location="${cweb-junit-ext.jar}" /> - <pathelement location="${sesame-sparql-test.jar}" /> - <pathelement location="${sesame-store-test.jar}" /> - <pathelement location="${dist.lib}/bigdata.jar" /> - <pathelement location="${dist.lib}/colt.jar" /> - <pathelement location="${dist.lib}/cweb-commons.jar" /> - <pathelement location="${dist.lib}/cweb-extser.jar" /> - <pathelement location="${dist.lib}/highscalelib.jar" /> - <pathelement location="${dist.lib}/dsiutils.jar" /> - <pathelement location="${dist.lib}/lgplutils.jar" /> - <pathelement location="${dist.lib}/fastutil.jar" /> - <pathelement location="${dist.lib}/icu4j.jar" /> - <pathelement location="${dist.lib}/iris.jar" /> - <pathelement location="${dist.lib}/jgrapht.jar" /> - <pathelement location="${dist.lib}/jsk-lib.jar" /> - <pathelement location="${dist.lib}/jsk-platform.jar" /> - <pathelement location="${dist.lib}/log4j.jar" /> - <pathelement location="${dist.lib}/lucene-analyzer.jar" /> - <pathelement location="${dist.lib}/lucene-core.jar" /> - <pathelement location="${dist.lib}/openrdf-sesame.jar" /> - <pathelement location="${dist.lib}/slf4j.jar" /> - <pathelement location="${dist.lib}/slf4j-log4j.jar" /> - <pathelement location="${dist.lib}/nxparser.jar" /> - <pathelement location="${dist.lib}/zookeeper.jar" /> - <pathelement location="${dist.lib}/jetty-continuation.jar" /> - <pathelement location="${dist.lib}/jetty-http.jar" /> - <pathelement location="${dist.lib}/jetty-io.jar" /> - <pathelement location="${dist.lib}/jetty-server.jar" /> - <pathelement location="${dist.lib}/jetty-util.jar" /> - <pathelement location="${dist.lib}/jetty-webapp.jar" /> - <pathelement location="${dist.lib}/jetty-servlet.jar" /> - <pathelement location="${dist.lib}/jetty-security.jar" /> - <pathelement location="${dist.lib}/jetty-xml.jar" /> - <pathelement location="${dist.lib}/servlet-api.jar" /> -</path> -<property name="run.class.path" value="${junit.jar}${path.separator}${bigdata-test.jar}${path.separator}${cweb-junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${dist.lib}/bigdata.jar${path.separator}${dist.lib}/colt.jar${path.separator}${dist.lib}/cweb-commons.jar${path.separator}${dist.lib}/cweb-extser.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/iris.jar${path.separator}${dist.lib}/jgrapht.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/slf4j-log4j.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar${path.separator}${dist.lib}/jetty-continuation.jar${path.separator}${dist.lib}/jetty-http.jar${path.separator}${dist.lib}/jetty-io.jar${path.separator}${dist.lib}/jetty-server.jar${path.separator}${dist.lib}/jetty-util.jar${path.separator}${dist.lib}/jetty-webapp.jar${path.separator}${dist.lib}/jetty-servlet.jar${path.separator}${dist.lib}/jetty-security.jar${path.separator}${dist.lib}/jetty-xml.jar${path.separator}${dist.lib}/servlet-api.jar" /> -<!-- Generate the LUBM dataset. - <mkdir dir="${data}"/> - <java classname="edu.lehigh.swat.bench.uba.Generator" dir="${data}" fork="yes"> - <classpath> - <path refid="run.class.path.id"/> - <pathelement location="${build.dir}/lubm/lib/bigdata-lubm.jar"/> - </classpath> - <jvmarg value="-server"/> - <jvmarg value="-Xmx400m"/> - <jvmarg value="-Dlog4j.configuration=file:bigdata/src/resources/logging/log4j-perf-tests.properties"/> - <arg value="-subdirs"/> - <arg value="-compress"/> - <arg value="GZip"/> - <arg value="-univ"/> - <arg value="50"/> - <arg value="-onto"/> - <arg value="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl"/> - </java> - --> -<!-- Delete the generated LUBM data set. - <delete dir="${data}"/> - --> -<!-- Run the data load, closure and query performance tests. --> -<copy file="bigdata/src/resources/logging/log4j-perf-tests.properties" todir="${perf.run.dir}" /> -<copy file="bigdata-sails/src/test/com/bigdata/rdf/stress/testLubm.xml" todir="${perf.run.dir}" /> -<java classname="com.bigdata.test.ExperimentDriver" dir="${perf.run.dir}" fork="yes"> - <classpath refid="run.class.path.id" /> - <jvmarg value="-server" /> - <jvmarg value="-Xmx2g" /> - <!-- Override the temporary directory to the specified run directory. --> - <jvmarg value="-Djava.io.tmpdir=${perf.run.dir}" /> - <jvmarg value="-Dlog4j.configuration=file:log4j-perf-tests.properties" /> - <jvmarg value="-Dontology=${perf.data.dir}/lubm/univ-bench.owl" /> - <jvmarg value="-Ddata=${perf.data.dir}/lubm/U50" /> - <jvmarg value="-Dquery=${perf.data.dir}/lubm/config.query.sparql" /> - <arg value="testLubm.xml" /> -</java> -<!-- Copy the results from the performance run @todo w/ append! - - Consider an after-action to do that and then delete the perf.run.dir. - - <copy file="${perf.run.dir}/com.bigdata.rdf.stress.LoadClosureAndQueryTest.exp.csv" todir="."/> - --> -<!-- Delete the runs directory, but FIRST copy out the results (with append). - <copy></copy> - <delete dir=${perf.run.dir}/> - --> -</target> - -<!-- --> <!-- SESAME SERVER TARGETS --> <!-- --> Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-perf/bsbm3/README.txt =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-perf/bsbm3/README.txt 2011-09-22 12:45:33 UTC (rev 5234) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-perf/bsbm3/README.txt 2011-09-22 15:11:19 UTC (rev 5235) @@ -1,14 +1,20 @@ -This directory contains a setup for running BSBM v3 against bigdata. The main -files are: +This directory contains a setup for running BSBM v3 against bigdata. -- bsbmtools - the bsbm3 source distribution. +In addition to the files in this directory, you will need the bsbmtools +distribution. This is available from +http://www4.wiwiss.fu-berlin.de/bizer/BerlinSPARQLBenchmark. Please consult +bsbmtools and the online documentation for BSBM for current information on +how to generate test data sets and the correct procedure for running the +benchmark. +The files in this directory include: + - build.properties - configuration properties for the ant script. -- build.xml - an ant script which may be used to generate a BSBM data set, load - the data set into a bigdata database instance, start a SPARQL - end point for that database instance, and run the BSBM benchmark - against that SPARQL end point. +- build.xml - an ant script which may be used to load a generated data set + a local bigdata database instance and start a SPARQL + end point for that database instance. You will then run the + benchmark against that SPARQL end point. - RWStore.properties - configuration properties for a bigdata database instance suitable for BSBM and backed by the RW persistence engine @@ -32,9 +38,11 @@ To get started: -1. Edit bigdata-perf/bsbm3/build.properties. +0. Generate a suitable data set. -1. In the top-level directory of the bigdata source tree, review build.properties +2. Edit bigdata-perf/bsbm3/build.properties. + +3. In the top-level directory of the bigdata source tree, review build.properties and then do: a. "ant bundleJar". @@ -42,18 +50,14 @@ Note: You will need to rerun this ant target any time you update the code from SVN or if you make edits to the source tree. -2. Change to the bigdata-perf/bsbm3 directory: +4. Change to the bigdata-perf/bsbm3 directory: - a. "ant run-generator" (generates the BSBM data set). - b. "ant run-load" (loads the generated data set into a bigdata instance). c. "ant start-nano-server" (starts the SPARQL end point). - - d. "ant run-query" (runs the benchmark). -There are a variety of other ant tasks in that directory which may be used to -run load and run the BSBM qualification data set, etc. +5. Follow the procedure for BSBM tools to run the benchmark against the SPARQL + end point. Performance should be extremely good for the reduced query mix, which can be enabled by editing: Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-perf/bsbm3/build.xml =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-perf/bsbm3/build.xml 2011-09-22 12:45:33 UTC (rev 5234) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-perf/bsbm3/build.xml 2011-09-22 15:11:19 UTC (rev 5235) @@ -1,27 +1,12 @@ - <!-- $Id$ --> <!-- --> <!-- do "ant bundle-jar" in the parent directory first. --> <!-- --> -<project name="bsbm" default="compile" basedir="."> +<project name="bsbm" basedir="."> <property file="build.properties" /> - <!-- build-time classpath. --> - <path id="build.classpath"> - <fileset dir="${bsbmtools.dir}/lib"> - <include name="**/*.jar" /> - </fileset> - </path> - - <!-- runtime classpath w/o install. --> <path id="runtime.classpath"> - <!-- The compiled BSBM classes. - <pathelement location="${build.dir}/classes" /> --> - <!-- The BSBM dependencies. --> - <fileset dir="${bsbmtools.dir}/lib"> - <include name="**/*.jar" /> - </fileset> <!-- The bigdata dependencies (for the nano-server). --> <fileset dir="${bigdata.build.dir}/lib"> <include name="**/*.jar" /> @@ -31,112 +16,20 @@ <target name="clean" description="cleans everything in [build.dir]"> <delete dir="${build.dir}" /> - <ant dir="${bsbmtools.dir}" antfile="build.xml" target="clean"/> </target> <target name="prepare"> <!-- create directories. --> <mkdir dir="${build.dir}" /> - <!-- - <mkdir dir="${build.dir}/classes" /> - <mkdir dir="${build.dir}/bin" /> --> <copy toDir="${build.dir}/bin"> <!-- copy logging and journal configuration file. --> <fileset file="${bsbm.dir}/*.properties" /> </copy> </target> - <target name="compile" depends="prepare" description="Compile the benchmark."> - <ant dir="${bsbmtools.dir}" antfile="build.xml" target="build"/> - <!-- - <javac destdir="${build.dir}/classes" classpathref="build.classpath" debug="${javac.debug}" debuglevel="${javac.debuglevel}" verbose="${javac.verbose}" encoding="${javac.encoding}"> - <src path="${bsbm.dir}/src/java" /> - <compilerarg value="-version" /> - </javac> --> - </target> - - <!-- - Here is how to qualify the system. - - You have to download the qualification dataset [1] (it's 20M, unzip it as - dataset_1m.ttl), its test driver data [2] (unzip this), and the correct - results [3], and put them in the ${bsbm.qualDataDir}. Then follow the - instructions in [4], which boils down to three ant tasks: - - ant run-load-qualification (loads the dataset) - ant run-qualification-1 (runs the queries) - ant run-qualification-2 (compares the actual query results to the correct query results) - - Also, note that src/resources/bsbm-data/ignoreQueries.txt file MUST be empty - when you run the qualifications queries. - - [1] http://www4.wiwiss.fu-berlin.de/bizer/BerlinSPARQLBenchmark/datasets/qualification.ttl.gz - [2] http://www4.wiwiss.fu-berlin.de/bizer/BerlinSPARQLBenchmark/datasets/td_data_q.zip - [3] http://www4.wiwiss.fu-berlin.de/bizer/BerlinSPARQLBenchmark/code/correct.qual - [3] http://www4.wiwiss.fu-berlin.de/bizer/BerlinSPARQLBenchmark/spec/index.html#qualification - - --> - - <target name="run-load-qualification" depends="compile" description="Load the qualification data set."> - <delete file="${bsbm.qualJournal}" /> - <java classname="com.bigdata.rdf.store.DataLoader" fork="true" failonerror="true" dir="${build.dir}/bin"> - <arg line="-namespace ${bsbm.qualNamespace} ${bsbm.journalPropertyFile} ${bsbm.qualDataDir}/dataset_1m.ttl" /> - <!-- specify/override the journal file name. --> - <jvmarg line="${queryJvmArgs} -Dcom.bigdata.journal.AbstractJournal.file=${bsbm.qualJournal}" /> - <classpath> - <path refid="runtime.classpath" /> - </classpath> - </java> - </target> - - <target name="run-qualification-1" depends="compile" description="Run the qualification queries."> - <java classname="benchmark.testdriver.TestDriver" fork="true" failonerror="true" dir="${build.dir}/bin"> - <arg line="-idir ${bsbm.qualDataDir}/td_data -q http://localhost:${bsbm.nanoServerPort}/" /> - <classpath> - <path refid="runtime.classpath" /> - </classpath> - </java> - </target> - - <target name="run-qualification-2" depends="compile" description="Compare qualification query run against ground truth."> - <java classname="benchmark.qualification.Qualification" fork="true" failonerror="true" dir="${build.dir}/bin"> - <arg line="${bsbm.qualDataDir}/correct.qual run.qual" /> - <classpath> - <path refid="runtime.classpath" /> - </classpath> - </java> - </target> - - <!-- @todo modify to support gzip and split output files for large runs. --> - <target name="run-generator" depends="compile"> - <echo message="bsbm.pc=${bsbm.pc}"/> - <echo message="bsbm.dataDir=${bsbm.dataDir}"/> - <echo message="bsbm.outputFile=${bsbm.outputFile}"/> - <echo message="bsbm.outputType=${bsbm.outputType}"/> - <mkdir dir="${bsbm.baseDir}" /> - <java classname="benchmark.generator.Generator" fork="true" failonerror="true" dir="${bsbmtools.dir}"> - <!-- -fc causes the generator to forward chain the test data. --> - <!-- -pc # specifies the #of products. --> - <!-- -s specifies the output type, generally 'nt' for ntriples. --> - <!-- -fn specifies the output file w/o the .nt extension. --> - <arg value="-fc" /> - <arg value="-pc" /> - <arg value="${bsbm.pc}" /> - <arg value="-dir" /> - <arg value="${bsbm.dataDir}" /> - <arg value="-s" /> - <arg value="${bsbm.outputType}" /> - <arg value="-fn" /> - <arg value="${bsbm.outputFile}" /> - <jvmarg value="-Xmx${bsbm.maxMem}" /> - <classpath> - <path refid="runtime.classpath" /> - </classpath> - </java> - </target> - <!-- Note: split data files and use RDFDataLoadMaster for scale-out. --> - <target name="run-load" depends="prepare" description="Load a data set."> + <target name="run-load" depends="prepare" + description="Load a data set."> <!-- delete file if it exists so we load into a new journal. --> <delete file="${bsbm.journalFile}" /> <java classname="com.bigdata.rdf.store.DataLoader" fork="true" failonerror="true" dir="${build.dir}/bin"> @@ -152,7 +45,8 @@ </java> </target> - <target name="start-sparql-server" depends="compile" description="Start a small http server fronting for a bigdata database instance."> + <target name="start-sparql-server" depends="prepare" + description="Start a small http server fronting for a bigdata database instance."> <java classname="com.bigdata.rdf.sail.webapp.NanoSparqlServer" fork="true" failonerror="true" dir="${build.dir}/bin"> <arg line="${bsbm.nanoServerPort} ${bsbm.namespace} ${bsbm.journalPropertyFile}" /> <!-- specify/override the journal file name. --> @@ -162,79 +56,5 @@ </classpath> </java> </target> - - <target name="run-sparql-query" depends="prepare" description="Run a single query read from a file.."> - <java classname="com.bigdata.rdf.sail.webapp.NanoSparqlClient" fork="true" failonerror="true"> - <arg line="-f query5-instance01-keyRangeVersion.sparql http://localhost:${bsbm.nanoServerPort}/sparql/" /> - <classpath> - <path refid="runtime.classpath" /> - </classpath> - </java> - </target> - - <target name="rampup" depends="compile" description="Runs the benchmark queries against the loaded data until system performance reaches a steady state as defined by the benchmark."> - <java classname="benchmark.testdriver.TestDriver" fork="true" failonerror="true" dir="${bsbmtools.dir}"> - - <arg value="-rampup" /> - - <!-- -idir dir is the test data directory (default td_data). --> - <arg value="-idir" /> - <arg value="${bsbm.dataDir}" /> - - <!-- The randomizer seed. --> - <arg value="-seed" /> - <arg value="random" /> - <!--<arg value="${bsbm.seed}"/>--> - - <!-- -o file is the name of the xml output file. --> - <arg value="-o" /> - <arg value="${bsbm.resultsDir}/benchmark_result_pc${bsbm.pc}_runs${bsbm.runs}_mt${bsbm.mt}.xml" /> - - <!-- The SPARQL endpoint. --> - <arg value="http://localhost:${bsbm.nanoServerPort}/" /> - - <classpath> - <path refid="runtime.classpath" /> - </classpath> - </java> - </target> - - <target name="run-query" depends="compile" description="Runs the benchmark queries against the loaded data."> - <java classname="benchmark.testdriver.TestDriver" fork="true" failonerror="true" dir="${bsbmtools.dir}"> - <!-- -runs # is the #of query mix runs (default is 500). --> - <arg value="-runs" /> - <arg value="${bsbm.runs}" /> - - <!-- -w # is the #of warmup query mixes (default is 50). --> - <arg value="-w" /> - <arg value="${bsbm.w}" /> - - <!-- -mt # is the #of concurrent clients. --> - <arg value="-mt" /> - <arg value="${bsbm.mt}" /> - - <!-- -qdir dir is the query directory (default is queries). --> - <!--<arg value="-qdir"/><arg value="src/resources/bsbm_data"/>--> - - <!-- -idir dir is the test data directory (default td_data). --> - <arg value="-idir" /> - <arg value="${bsbm.dataDir}" /> - - <!-- The randomizer seed. --> - <arg value="-seed" /> - <arg value="${bsbm.seed}" /> - - <!-- -o file is the name of the xml output file. --> - <arg value="-o" /> - <arg value="${bsbm.resultsDir}/benchmark_result_pc${bsbm.pc}_runs${bsbm.runs}_mt${bsbm.mt}.xml" /> - - <!-- The SPARQL endpoint. --> - <arg value="http://localhost:${bsbm.nanoServerPort}/" /> - - <classpath> - <path refid="runtime.classpath" /> - </classpath> - </java> - </target> - + </project> Modified: branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/README.txt =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/README.txt 2011-09-22 12:45:33 UTC (rev 5234) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/README.txt 2011-09-22 15:11:19 UTC (rev 5235) @@ -1,2 +1,11 @@ -This module contains an integration with LUBM (the Lehigh University -Benchmark). \ No newline at end of file +The LUBM benchmark can be downloaded from [1]. Directions on its use are +available from the project home page. You can download a modified version of +the LUBM benchmark which can make it a bit easier to use with bigdata from [2]. +Please contact the project maintainers if you have questions about this modified +version of the LUBM benchmark. See [3] and [4] for information about running +LUBM with bigdata. + +[1] http://swat.cse.lehigh.edu/projects/lubm/ +[2] http://www.systap.com/bigdata/bigdata-lubm.tgz +[3] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=LUBM +[4] https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=LUBM_Cluster Deleted: branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/RWStore.properties =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/RWStore.properties 2011-09-22 12:45:33 UTC (rev 5234) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/RWStore.properties 2011-09-22 15:11:19 UTC (rev 5235) @@ -1,167 +0,0 @@ -# -# Note: These options are applied when the journal and the triple store are -# created. If you want to modify options after than you must do so using -# [ant set-properties] or by overriding appropriate properties on the command -# line. - -## -## Journal options. -## - -# Disk is the worm store. DiskRW is the read/write store. -com.bigdata.journal.AbstractJournal.bufferMode=DiskRW -com.bigdata.btree.writeRetentionQueue.capacity=8000 -com.bigdata.btree.BTree.branchingFactor=128 -com.bigdata.btree.maxRecLen=256 -com.bigdata.rdf.store.AbstractTripleStore.inlineXSDDatatypeLiterals=true -com.bigdata.rdf.store.AbstractTripleStore.inlineTextLiterals=false -com.bigdata.rdf.store.AbstractTripleStore.maxInlineTextLength=0 -com.bigdata.rdf.store.AbstractTripleStore.inlineBNodes=true -com.bigdata.rdf.store.AbstractTripleStore.inlineDateTimes=true - -# RWStore options. -#com.bigdata.rwstore.RWStore.allocationSizes=1, 2, 3, 5, 8, 12, 16, 32, 48, 64, 128 - -# -# Overrides for various indices. -# - -# U50 4k pages. -#com.bigdata.namespace.LUBM_U50.lex.com.bigdata.btree.BTree.branchingFactor=1024 -#com.bigdata.namespace.LUBM_U50.spo.com.bigdata.btree.BTree.branchingFactor=512 -#com.bigdata.namespace.LUBM_U50.lex.TERM2ID.com.bigdata.btree.BTree.branchingFactor=270 -#com.bigdata.namespace.LUBM_U50.lex.ID2TERM.com.bigdata.btree.BTree.branchingFactor=120 -com.bigdata.namespace.LUBM_U50.lex.TERMS.com.bigdata.btree.BTree.branchingFactor=512 -com.bigdata.namespace.LUBM_U50.spo.POS.com.bigdata.btree.BTree.branchingFactor=970 -com.bigdata.namespace.LUBM_U50.spo.SPO.com.bigdata.btree.BTree.branchingFactor=512 -com.bigdata.namespace.LUBM_U50.spo.OSP.com.bigdata.btree.BTree.branchingFactor=470 - -# U1000 4k -#com.bigdata.namespace.LUBM_U1000.lex.TERM2ID.com.bigdata.btree.BTree.branchingFactor=270 -#com.bigdata.namespace.LUBM_U1000.lex.ID2TERM.com.bigdata.btree.BTree.branchingFactor=120 -com.bigdata.namespace.LUBM_U1000.lex.TERMS.com.bigdata.btree.BTree.branchingFactor=512 -com.bigdata.namespace.LUBM_U1000.spo.POS.com.bigdata.btree.BTree.branchingFactor=970 -com.bigdata.namespace.LUBM_U1000.spo.SPO.com.bigdata.btree.BTree.branchingFactor=512 -com.bigdata.namespace.LUBM_U1000.spo.OSP.com.bigdata.btree.BTree.branchingFactor=470 - -# U50 8k pages. -#com.bigdata.namespace.LUBM_U50.lex.TERM2ID.com.bigdata.btree.BTree.branchingFactor=540 -#com.bigdata.namespace.LUBM_U50.lex.ID2TERM.com.bigdata.btree.BTree.branchingFactor=240 -#com.bigdata.namespace.LUBM_U50.spo.POS.com.bigdata.btree.BTree.branchingFactor=1940 -#com.bigdata.namespace.LUBM_U50.spo.SPO.com.bigdata.btree.BTree.branchingFactor=1024 -#com.bigdata.namespace.LUBM_U50.spo.OSP.com.bigdata.btree.BTree.branchingFactor=940 - -# Override the #of write cache buffers. -#com.bigdata.journal.AbstractJournal.writeCacheBufferCount=12 - -# 200M initial extent. -com.bigdata.journal.AbstractJournal.initialExtent=209715200 -com.bigdata.journal.AbstractJournal.maximumExtent=209715200 - -## -## Triple store options. -## - -com.bigdata.rdf.store.AbstractTripleStore.quads=false -com.bigdata.rdf.store.AbstractTripleStore.statementIdentifiers=false -com.bigdata.rdf.store.AbstractTripleStore.textIndex=false -com.bigdata.rdf.store.AbstractTripleStore.vocabularyClass=com.bigdata.rdf.vocab.LUBMVocabulary - -# Default is 100k. -#com.bigdata.rdf.store.DataLoader.bufferCapacity=100000 - -# 50000 is the default. -#com.bigdata.rdf.store.AbstractTripleStore.termCache.capacity=50000 - -# When "true", the store will perform incremental closure as the data -# are loaded. When "false", the closure will be computed after all data -# are loaded. (Actually, since we are not loading through the SAIL -# making this true does not cause incremental TM but it does disable -# closure, so "false" is what you need here). -com.bigdata.rdf.sail.truthMaintenance=false - -# -# Option to restrict ourselves to RDFS only inference. This condition -# may be compared readily to many other stores. -# -# Note: While we can turn on some kinds of owl processing (e.g., -# TransitiveProperty, see below), we can not compute all the necessary -# entailments (only queries 11 and 13 benefit). -# -# Note: There are no owl:sameAs assertions in LUBM. -# -# Note: lubm query does not benefit from owl:inverseOf. -# -# Note: lubm query does benefit from owl:TransitiveProperty (queries 11 -# and 13). -# -# Note: owl:Restriction (which we can not compute) plus -# owl:TransitiveProperty is required to get all the answers for LUBM. -# -# @todo disable the backchainer for LDS, EDS, JDS. -# -#com.bigdata.rdf.store.AbstractTripleStore.axiomsClass=com.bigdata.rdf.axioms.NoAxioms -com.bigdata.rdf.store.AbstractTripleStore.axiomsClass=com.bigdata.rdf.axioms.RdfsAxioms - -# 10000 is default. -#com.bigdata.rdf.sail.bufferCapacity=10000 - -# Produce a full closure (all entailments) so that the backward chainer -# is always a NOP. Note that the configuration properties are stored in -# the database (in the global row store) so you always get exactly the -# same configuration that you created when reopening a triple store. - -# properties.setProperty(Options.FORWARD_CHAIN_RDF_TYPE_RDFS_RESOURCE, "true"); -# properties.setProperty(Options.FORWARD_CHAIN_OWL_SAMEAS_PROPERTIES, "true"); - -# Additional owl inferences. LUBM only both inverseOf and -# TransitiveProperty of those that we support (owl:sameAs, -# owl:inverseOf, owl:TransitiveProperty), but not owl:sameAs. -com.bigdata.rdf.rules.InferenceEngine.forwardChainOwlInverseOf=true -com.bigdata.rdf.rules.InferenceEngine.forwardChainOwlTransitiveProperty=true - -# Note: FastClosure is the default. -#com.bigdata.rdf.store.AbstractTripleStore.closureClass=com.bigdata.rdf.rules.FastClosure -#com.bigdata.rdf.store.AbstractTripleStore.closureClass=com.bigdata.rdf.rules.FullClosure - -# Turn off incremental closure in the DataLoader object. -com.bigdata.rdf.store.DataLoader.closure=None - -# Turn off commit in the DataLoader object. We do not need to commit -# anything until we have loaded all the data and computed the closure -# over the database. -com.bigdata.rdf.store.DataLoader.commit=None - -# Turn off Unicode support for index keys (this is a big win for load -# rates since LUBM does not use Unicode data, but it has very little -# effect on query rates since the only time we generate Unicode sort -# keys is when resolving the Values in the queries to term identifiers -# in the database). -com.bigdata.btree.keys.KeyBuilder.collator=ASCII - -# Turn on bloom filter for the SPO index (good up to ~2M index entries -# for scale-up -or- for any size index for scale-out). -com.bigdata.rdf.store.AbstractTripleStore.bloomFilter=false - -# Turn off justifications (impacts only the load performance, but -# it is a big impact and only required if you will be doing TM). -com.bigdata.rdf.store.AbstractTripleStore.justify=false - -# Maximum #of subqueries to evaluate concurrently for the 1st join -# dimension for native rules. Zero disables the use of an executor -# service. One forces a single thread, but runs the subquery on the -# executor service. N>1 is concurrent subquery evaluation. -#com.bigdata.relation.rule.eval.maxParallelSubqueries=5 -com.bigdata.relation.rule.eval.maxParallelSubqueries=0 - -# May be used to turn off query-time expansion of entailments such as -# (x rdf:type rdfs:Resource) and owl:sameAs even through those -# entailments were not materialized during forward closure. (This -# property is interpreted by the BigdataSail). -com.bigdata.rdf.sail.queryTimeExpander=false - -# Note: LUBM uses blank nodes. Therefore re-loading LUBM will always -# cause new statements to be asserted and result in the closure being -# updated if it is recomputed. You can work around this using this -# property. -#com.bigdata.rdf.store.AbstractTripleStore.storeBlankNodes=true Deleted: branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/WORMStore.properties =================================================================== --- branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/WORMStore.properties 2011-09-22 12:45:33 UTC (rev 5234) +++ branches/TERMS_REFACTOR_BRANCH/bigdata-perf/lubm/WORMStore.properties 2011-09-22 15:11:19 UTC (rev 5235) @@ -1,121 +0,0 @@ -# -# Note: These options are applied when the journal and the triple store are -# created. If you want to modify options after than you must do so using -# [ant set-properties] or by overriding appropriate properties on the command -# line. - -## -## Journal options. -## - -# Disk is the worm store. DiskRW is the read/write store. -com.bigdata.journal.AbstractJournal.bufferMode=DiskWORM -com.bigdata.btree.writeRetentionQueue.capacity=8000 - -# 200M initial extent. -com.bigdata.journal.AbstractJournal.initialExtent=209715200 -com.bigdata.journal.AbstractJournal.maximumExtent=209715200 - -## -## Triple store options. -## - -com.bigdata.rdf.store.AbstractTripleStore.quads=false -com.bigdata.rdf.store.AbstractTripleStore.statementIdentifiers=false -com.bigdata.rdf.store.AbstractTripleStore.textIndex=false - -# 50000 is the default. -#com.bigdata.rdf.store.AbstractTripleStore.termCache.capacity=50000 - -# When "true", the store will perform incremental closure as the data -# are loaded. When "false", the closure will be computed after all data -# are loaded. (Actually, since we are not loading through the SAIL -# making this true does not cause incremental TM but it does disable -# closure, so "false" is what you need here). -com.bigdata.rdf.sail.truthMaintenance=false - -# -# Option to restrict ourselves to RDFS only inference. This condition -# may be compared readily to many other stores. -# -# Note: While we can turn on some kinds of owl processing (e.g., -# TransitiveProperty, see below), we can not compute all the necessary -# entailments (only queries 11 and 13 benefit). -# -# Note: There are no owl:sameAs assertions in LUBM. -# -# Note: lubm query does not benefit from owl:inverseOf. -# -# Note: lubm query does benefit from owl:TransitiveProperty (queries 11 -# and 13). -# -# Note: owl:Restriction (which we can not compute) plus -# owl:TransitiveProperty is required to get all the answers for LUBM. -# -# @todo disable the backchainer for LDS, EDS, JDS. -# -#com.bigdata.rdf.store.AbstractTripleStore.axiomsClass=com.bigdata.rdf.axioms.NoAxioms -com.bigdata.rdf.store.AbstractTripleStore.axiomsClass=com.bigdata.rdf.axioms.RdfsAxioms - -# 10000 is default. -com.bigdata.rdf.sail.bufferCapacity=100000 - -# Produce a full closure (all entailments) so that the backward chainer -# is always a NOP. Note that the configuration properties are stored in -# the database (in the global row store) so you always get exactly the -# same configuration that you created when reopening a triple store. - -# properties.setProperty(Options.FORWARD_CHAIN_RDF_TYPE_RDFS_RESOURCE, "true"); -# properties.setProperty(Options.FORWARD_CHAIN_OWL_SAMEAS_PROPERTIES, "true"); - -# Additional owl inferences. LUBM only both inverseOf and -# TransitiveProperty of those that we support (owl:sameAs, -# owl:inverseOf, owl:TransitiveProperty), but not owl:sameAs. -com.bigdata.rdf.rules.InferenceEngine.forwardChainOwlInverseOf=true -com.bigdata.rdf.rules.InferenceEngine.forwardChainOwlTransitiveProperty=true - -# Note: FastClosure is the default. -#com.bigdata.rdf.store.AbstractTripleStore.closureClass=com.bigdata.rdf.rules.FastClosure -#com.bigdata.rdf.store.AbstractTripleStore.closureClass=com.bigdata.rdf.rules.FullClosure - -# Turn off incremental closure in the DataLoader object. -com.bigdata.rdf.store.DataLoader.closure=None - -# Turn off commit in the DataLoader object. We do not need to commit -# anything until we have loaded all the data and computed the closure -# over the database. -com.bigdata.rdf.store.DataLoader.commit=None - -# Turn off Unicode support for index keys (this is a big win for load -# rates since LUBM does not use Unicode data, but it has very little -# effect on query rates since the only time we generate Unicode sort -# keys is when resolving the Values in the queries to term identifiers -# in the database). -com.bigdata.btree.keys.KeyBuilder.collator=ASCII - -# Turn on bloom filter for the SPO index (good up to ~2M index entries -# for scale-up -or- for any size index for scale-out). -com.bigdata.rdf.store.AbstractTripleStore.bloomFilter=true - -# Turn off justifications (impacts only the load performance, but -# it is a big impact and only required if you will be doing TM). -com.bigdata.rdf.store.AbstractTripleStore.justify=false - -# Maximum #of subqueries to evaluate concurrently for the 1st join -# dimension for native rules. Zero disables the use of an executor -# service. One forces a single thread, but runs the subquery on the -# executor service. N>1 is concurrent subquery evaluation. -#com.bigdata.relation.rule.e... [truncated message content] |
From: <tho...@us...> - 2011-09-22 15:15:15
|
Revision: 5236 http://bigdata.svn.sourceforge.net/bigdata/?rev=5236&view=rev Author: thompsonbry Date: 2011-09-22 15:15:09 +0000 (Thu, 22 Sep 2011) Log Message: ----------- Modified .classpath to remove bad reference. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/.classpath branches/TERMS_REFACTOR_BRANCH/.classpath Modified: branches/BIGDATA_RELEASE_1_0_0/.classpath =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/.classpath 2011-09-22 15:11:19 UTC (rev 5235) +++ branches/BIGDATA_RELEASE_1_0_0/.classpath 2011-09-22 15:15:09 UTC (rev 5236) @@ -17,7 +17,6 @@ <classpathentry kind="src" path="dsi-utils/src/test"/> <classpathentry kind="src" path="ctc-striterators/src/java"/> <classpathentry kind="src" path="ctc-striterators/src/test"/> - <classpathentry kind="src" path="bigdata-perf/bsbm/src/test"/> <classpathentry kind="src" path="bigdata-sails/src/prolog"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/dsi-utils-1.0.6-020610.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/lgpl-utils-1.0.6-020610.jar"/> Modified: branches/TERMS_REFACTOR_BRANCH/.classpath =================================================================== --- branches/TERMS_REFACTOR_BRANCH/.classpath 2011-09-22 15:11:19 UTC (rev 5235) +++ branches/TERMS_REFACTOR_BRANCH/.classpath 2011-09-22 15:15:09 UTC (rev 5236) @@ -17,7 +17,6 @@ <classpathentry kind="src" path="dsi-utils/src/test"/> <classpathentry kind="src" path="ctc-striterators/src/java"/> <classpathentry kind="src" path="ctc-striterators/src/test"/> - <classpathentry kind="src" path="bigdata-perf/bsbm/src/test"/> <classpathentry kind="src" path="junit-ext/src/test"/> <classpathentry kind="src" path="junit-ext/src/java"/> <classpathentry kind="src" path="lgpl-utils/src/java"/> @@ -69,7 +68,7 @@ <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/slf4j-api-1.6.1.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/slf4j-log4j12-1.6.1.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/openrdf-sesame-2.5.0-onejar.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/sesame-sparql-testsuite-2.5.0.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/sesame-store-testsuite-2.5.0.jar"/> + <classpathentry kind="lib" path="bigdata-sails/lib/sesame-sparql-testsuite-2.5.0.jar" sourcepath="/org.openrdf.sesame-2.5.0"/> + <classpathentry kind="lib" path="bigdata-sails/lib/sesame-store-testsuite-2.5.0.jar" sourcepath="/org.openrdf.sesame-2.5.0"/> <classpathentry kind="output" path="bin"/> </classpath> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |