This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <mrp...@us...> - 2011-01-12 18:18:00
|
Revision: 4075 http://bigdata.svn.sourceforge.net/bigdata/?rev=4075&view=rev Author: mrpersonick Date: 2011-01-12 18:17:51 +0000 (Wed, 12 Jan 2011) Log Message: ----------- working on nested optional groups Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/TestDeepCopy.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/controller/TestOptionalJoinGroup.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsInline.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsLiteral.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/TestInlineConstraints.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOp.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOp2BOpUtility.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOpTree.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOpTreeBuilder.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNestedOptionals.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/SubqueryOp.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategy.java Removed Paths: ------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/OptionalJoinGroup.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/AbstractInlineConstraint.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineEQ.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineGE.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineGT.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineLE.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineLT.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineNE.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java 2011-01-12 04:44:24 UTC (rev 4074) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java 2011-01-12 18:17:51 UTC (rev 4075) @@ -139,8 +139,8 @@ this.sink2 = context.getSink2(); - if (sink2 == null) - throw new IllegalArgumentException(); +// if (sink2 == null) +// throw new IllegalArgumentException(); if (sink == sink2) throw new IllegalArgumentException(); @@ -186,7 +186,7 @@ // stats.unitsOut.add(ndef); } - if (nalt > 0) { + if (nalt > 0 && sink2 != null) { if (nalt == alt.length) sink2.add(alt); else @@ -198,14 +198,16 @@ } sink.flush(); - sink2.flush(); + if (sink2 != null) + sink2.flush(); return null; } finally { sink.close(); - sink2.close(); + if (sink2 != null) + sink2.close(); } Deleted: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/OptionalJoinGroup.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/OptionalJoinGroup.java 2011-01-12 04:44:24 UTC (rev 4074) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/OptionalJoinGroup.java 2011-01-12 18:17:51 UTC (rev 4075) @@ -1,437 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Aug 18, 2010 - */ - -package com.bigdata.bop.controller; - -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.Executor; -import java.util.concurrent.FutureTask; - -import com.bigdata.bop.BOp; -import com.bigdata.bop.BOpContext; -import com.bigdata.bop.BOpUtility; -import com.bigdata.bop.IBindingSet; -import com.bigdata.bop.NV; -import com.bigdata.bop.PipelineOp; -import com.bigdata.bop.engine.IRunningQuery; -import com.bigdata.bop.engine.LocalChunkMessage; -import com.bigdata.bop.engine.QueryEngine; -import com.bigdata.relation.accesspath.IAsynchronousIterator; -import com.bigdata.relation.accesspath.ThickAsynchronousIterator; -import com.bigdata.util.concurrent.LatchedExecutor; - -/** - * For each binding set presented, this operator executes a subquery. Any - * solutions produced by the subquery are copied to the default sink. If no - * solutions are produced, then the original binding set is copied to the - * default sink (optional join semantics). Each subquery is run as a separate - * query but will be cancelled if the parent query is cancelled. - * - * FIXME Parallel evaluation of subqueries is not implemented. What is the - * appropriate parallelism for this operator? More parallelism should reduce - * latency but could increase the memory burden. Review this decision once we - * have the RWStore operating as a binding set buffer on the Java process heap. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ -public class OptionalJoinGroup extends PipelineOp { - - /** - * - */ - private static final long serialVersionUID = 1L; - - public interface Annotations extends PipelineOp.Annotations { - - /** - * The subquery to be evaluated for each binding sets presented to the - * {@link OptionalJoinGroup} (required). This should be a - * {@link PipelineOp}. - */ - String SUBQUERY = OptionalJoinGroup.class.getName() + ".subquery"; - - /** - * When <code>true</code> the subquery has optional semantics (if the - * subquery fails, the original binding set will be passed along to the - * downstream sink anyway). - */ - String OPTIONAL = OptionalJoinGroup.class.getName() + ".optional"; - - boolean DEFAULT_OPTIONAL = true; - - /** - * The maximum parallelism with which the subqueries will be evaluated - * (default {@value #DEFAULT_MAX_PARALLEL}). - */ - String MAX_PARALLEL = OptionalJoinGroup.class.getName() - + ".maxParallel"; - - int DEFAULT_MAX_PARALLEL = 1; - - } - - /** - * @see Annotations#MAX_PARALLEL - */ - public int getMaxParallel() { - return getProperty(Annotations.MAX_PARALLEL, - Annotations.DEFAULT_MAX_PARALLEL); - } - - /** - * Deep copy constructor. - */ - public OptionalJoinGroup(final OptionalJoinGroup op) { - super(op); - } - - /** - * Shallow copy constructor. - * - * @param args - * @param annotations - */ - public OptionalJoinGroup(final BOp[] args, - final Map<String, Object> annotations) { - - super(args, annotations); - -// if (!getEvaluationContext().equals(BOpEvaluationContext.CONTROLLER)) -// throw new IllegalArgumentException(Annotations.EVALUATION_CONTEXT -// + "=" + getEvaluationContext()); - - getRequiredProperty(Annotations.SUBQUERY); - -// if (!getProperty(Annotations.CONTROLLER, Annotations.DEFAULT_CONTROLLER)) -// throw new IllegalArgumentException(Annotations.CONTROLLER); - -// // The id of this operator (if any). -// final Integer thisId = (Integer)getProperty(Annotations.BOP_ID); -// -// for(BOp op : args) { -// -// final Integer sinkId = (Integer) op -// .getRequiredProperty(Annotations.SINK_REF); -// -// if(sinkId.equals(thisId)) -// throw new RuntimeException("Operand may not target ") -// -// } - - } - - public OptionalJoinGroup(final BOp[] args, NV... annotations) { - - this(args, NV.asMap(annotations)); - - } - - public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { - - return new FutureTask<Void>(new ControllerTask(this, context)); - - } - - /** - * Evaluates the arguments of the operator as subqueries. The arguments are - * evaluated in order. An {@link Executor} with limited parallelism to - * evaluate the arguments. If the controller operator is interrupted, then - * the subqueries are cancelled. If a subquery fails, then all subqueries - * are cancelled. - */ - private static class ControllerTask implements Callable<Void> { - - private final OptionalJoinGroup controllerOp; - private final BOpContext<IBindingSet> context; -// private final List<FutureTask<IRunningQuery>> tasks = new LinkedList<FutureTask<IRunningQuery>>(); -// private final CountDownLatch latch; - private final boolean optional; - private final int nparallel; - private final PipelineOp subquery; - private final Executor executor; - - public ControllerTask(final OptionalJoinGroup controllerOp, final BOpContext<IBindingSet> context) { - - if (controllerOp == null) - throw new IllegalArgumentException(); - - if (context == null) - throw new IllegalArgumentException(); - - this.controllerOp = controllerOp; - - this.context = context; - - this.optional = controllerOp.getProperty(Annotations.OPTIONAL, - Annotations.DEFAULT_OPTIONAL); - - this.nparallel = controllerOp.getProperty(Annotations.MAX_PARALLEL, - Annotations.DEFAULT_MAX_PARALLEL); - - this.subquery = (PipelineOp) controllerOp - .getRequiredProperty(Annotations.SUBQUERY); - - this.executor = new LatchedExecutor(context.getIndexManager() - .getExecutorService(), nparallel); - -// this.latch = new CountDownLatch(controllerOp.arity()); - -// /* -// * Create FutureTasks for each subquery. The futures are submitted -// * to the Executor yet. That happens in call(). By deferring the -// * evaluation until call() we gain the ability to cancel all -// * subqueries if any subquery fails. -// */ -// for (BOp op : controllerOp.args()) { -// -// /* -// * Task runs subquery and cancels all subqueries in [tasks] if -// * it fails. -// */ -// tasks.add(new FutureTask<IRunningQuery>(new SubqueryTask(op, -// context)) { -// /* -// * Hook future to count down the latch when the task is -// * done. -// */ -// public void run() { -// try { -// super.run(); -// } finally { -// latch.countDown(); -// } -// } -// }); -// -// } - - } - - /** - * Evaluate the subquery. - * - * @todo Support limited parallelism for each binding set read from the - * source. We will need to keep track of the running subqueries in - * order to wait on them before returning from this method and in - * order to cancel them if something goes wrong. - */ - public Void call() throws Exception { - - try { - - final IAsynchronousIterator<IBindingSet[]> sitr = context - .getSource(); - - while(sitr.hasNext()) { - - final IBindingSet[] chunk = sitr.next(); - - for(IBindingSet bset : chunk) { - - FutureTask<IRunningQuery> ft = new FutureTask<IRunningQuery>( - new SubqueryTask(bset, subquery, context)); - - // run the subquery. - executor.execute(ft); - - try { - - // wait for the outcome. - ft.get(); - - } finally { - - /* - * Ensure that the inner task is cancelled if the - * outer task is interrupted. - */ - ft.cancel(true/* mayInterruptIfRunning */); - - } - - } - - } - -// /* -// * Run subqueries with limited parallelism. -// */ -// for (FutureTask<IRunningQuery> ft : tasks) { -// executor.execute(ft); -// } -// -// /* -// * Wait for all subqueries to complete. -// */ -// latch.await(); -// -// /* -// * Get the futures, throwing out any errors. -// */ -// for (FutureTask<IRunningQuery> ft : tasks) -// ft.get(); - - // Now that we know the subqueries ran Ok, flush the sink. - context.getSink().flush(); - - // Done. - return null; - - } finally { - -// // Cancel any tasks which are still running. -// for (FutureTask<IRunningQuery> ft : tasks) -// ft.cancel(true/* mayInterruptIfRunning */); - - context.getSource().close(); - - context.getSink().close(); - - if (context.getSink2() != null) - context.getSink2().close(); - - } - - } - - /** - * Run a subquery. - * - * @author <a href="mailto:tho...@us...">Bryan - * Thompson</a> - */ - private class SubqueryTask implements Callable<IRunningQuery> { - - /** - * The evaluation context for the parent query. - */ - private final BOpContext<IBindingSet> parentContext; - - /** - * The source binding set. This will be copied to the output if - * there are no solutions for the subquery (optional join - * semantics). - */ - private final IBindingSet bset; - - /** - * The root operator for the subquery. - */ - private final BOp subQueryOp; - - public SubqueryTask(final IBindingSet bset, final BOp subQuery, - final BOpContext<IBindingSet> parentContext) { - - this.bset = bset; - - this.subQueryOp = subQuery; - - this.parentContext = parentContext; - - } - - public IRunningQuery call() throws Exception { - - IAsynchronousIterator<IBindingSet[]> subquerySolutionItr = null; - try { - - final QueryEngine queryEngine = parentContext.getRunningQuery() - .getQueryEngine(); - -// final IRunningQuery runningQuery = queryEngine -// .eval(subQueryOp); - - final BOp startOp = BOpUtility.getPipelineStart(subQueryOp); - - final int startId = startOp.getId(); - - final UUID queryId = UUID.randomUUID(); - - // execute the subquery, passing in the source binding set. - final IRunningQuery runningQuery = queryEngine - .eval( - queryId, - (PipelineOp) subQueryOp, - new LocalChunkMessage<IBindingSet>( - queryEngine, - queryId, - startId, - -1 /* partitionId */, - new ThickAsynchronousIterator<IBindingSet[]>( - new IBindingSet[][] { new IBindingSet[] { bset } }))); - - // Iterator visiting the subquery solutions. - subquerySolutionItr = runningQuery.iterator(); - - // Copy solutions from the subquery to the query. - final long ncopied = BOpUtility.copy(subquerySolutionItr, - parentContext.getSink(), null/* sink2 */, - null/* constraints */, null/* stats */); - - // wait for the subquery. - runningQuery.get(); - - if (ncopied == 0L && optional) { - - /* - * Since there were no solutions for the subquery, copy - * the original binding set to the default sink. - */ - parentContext.getSink().add(new IBindingSet[]{bset}); - - } - - // done. - return runningQuery; - - } catch (Throwable t) { - - /* - * If a subquery fails, then propagate the error to the - * parent and rethrow the first cause error out of the - * subquery. - */ - throw new RuntimeException(ControllerTask.this.context - .getRunningQuery().halt(t)); - - } finally { - - if (subquerySolutionItr != null) - subquerySolutionItr.close(); - - } - - } - - } // SubqueryTask - - } // ControllerTask - -} Copied: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/SubqueryOp.java (from rev 4072, branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/OptionalJoinGroup.java) =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/SubqueryOp.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/SubqueryOp.java 2011-01-12 18:17:51 UTC (rev 4075) @@ -0,0 +1,437 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Aug 18, 2010 + */ + +package com.bigdata.bop.controller; + +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.Executor; +import java.util.concurrent.FutureTask; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.BOpContext; +import com.bigdata.bop.BOpUtility; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.NV; +import com.bigdata.bop.PipelineOp; +import com.bigdata.bop.engine.IRunningQuery; +import com.bigdata.bop.engine.LocalChunkMessage; +import com.bigdata.bop.engine.QueryEngine; +import com.bigdata.relation.accesspath.IAsynchronousIterator; +import com.bigdata.relation.accesspath.ThickAsynchronousIterator; +import com.bigdata.util.concurrent.LatchedExecutor; + +/** + * For each binding set presented, this operator executes a subquery. Any + * solutions produced by the subquery are copied to the default sink. If no + * solutions are produced, then the original binding set is copied to the + * default sink (optional join semantics). Each subquery is run as a separate + * query but will be cancelled if the parent query is cancelled. + * + * FIXME Parallel evaluation of subqueries is not implemented. What is the + * appropriate parallelism for this operator? More parallelism should reduce + * latency but could increase the memory burden. Review this decision once we + * have the RWStore operating as a binding set buffer on the Java process heap. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class SubqueryOp extends PipelineOp { + + /** + * + */ + private static final long serialVersionUID = 1L; + + public interface Annotations extends PipelineOp.Annotations { + + /** + * The subquery to be evaluated for each binding sets presented to the + * {@link SubqueryOp} (required). This should be a + * {@link PipelineOp}. + */ + String SUBQUERY = SubqueryOp.class.getName() + ".subquery"; + + /** + * When <code>true</code> the subquery has optional semantics (if the + * subquery fails, the original binding set will be passed along to the + * downstream sink anyway). + */ + String OPTIONAL = SubqueryOp.class.getName() + ".optional"; + + boolean DEFAULT_OPTIONAL = false; + + /** + * The maximum parallelism with which the subqueries will be evaluated + * (default {@value #DEFAULT_MAX_PARALLEL}). + */ + String MAX_PARALLEL = SubqueryOp.class.getName() + + ".maxParallel"; + + int DEFAULT_MAX_PARALLEL = 1; + + } + + /** + * @see Annotations#MAX_PARALLEL + */ + public int getMaxParallel() { + return getProperty(Annotations.MAX_PARALLEL, + Annotations.DEFAULT_MAX_PARALLEL); + } + + /** + * Deep copy constructor. + */ + public SubqueryOp(final SubqueryOp op) { + super(op); + } + + /** + * Shallow copy constructor. + * + * @param args + * @param annotations + */ + public SubqueryOp(final BOp[] args, + final Map<String, Object> annotations) { + + super(args, annotations); + +// if (!getEvaluationContext().equals(BOpEvaluationContext.CONTROLLER)) +// throw new IllegalArgumentException(Annotations.EVALUATION_CONTEXT +// + "=" + getEvaluationContext()); + + getRequiredProperty(Annotations.SUBQUERY); + +// if (!getProperty(Annotations.CONTROLLER, Annotations.DEFAULT_CONTROLLER)) +// throw new IllegalArgumentException(Annotations.CONTROLLER); + +// // The id of this operator (if any). +// final Integer thisId = (Integer)getProperty(Annotations.BOP_ID); +// +// for(BOp op : args) { +// +// final Integer sinkId = (Integer) op +// .getRequiredProperty(Annotations.SINK_REF); +// +// if(sinkId.equals(thisId)) +// throw new RuntimeException("Operand may not target ") +// +// } + + } + + public SubqueryOp(final BOp[] args, NV... annotations) { + + this(args, NV.asMap(annotations)); + + } + + public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { + + return new FutureTask<Void>(new ControllerTask(this, context)); + + } + + /** + * Evaluates the arguments of the operator as subqueries. The arguments are + * evaluated in order. An {@link Executor} with limited parallelism to + * evaluate the arguments. If the controller operator is interrupted, then + * the subqueries are cancelled. If a subquery fails, then all subqueries + * are cancelled. + */ + private static class ControllerTask implements Callable<Void> { + + private final SubqueryOp controllerOp; + private final BOpContext<IBindingSet> context; +// private final List<FutureTask<IRunningQuery>> tasks = new LinkedList<FutureTask<IRunningQuery>>(); +// private final CountDownLatch latch; + private final boolean optional; + private final int nparallel; + private final PipelineOp subquery; + private final Executor executor; + + public ControllerTask(final SubqueryOp controllerOp, final BOpContext<IBindingSet> context) { + + if (controllerOp == null) + throw new IllegalArgumentException(); + + if (context == null) + throw new IllegalArgumentException(); + + this.controllerOp = controllerOp; + + this.context = context; + + this.optional = controllerOp.getProperty(Annotations.OPTIONAL, + Annotations.DEFAULT_OPTIONAL); + + this.nparallel = controllerOp.getProperty(Annotations.MAX_PARALLEL, + Annotations.DEFAULT_MAX_PARALLEL); + + this.subquery = (PipelineOp) controllerOp + .getRequiredProperty(Annotations.SUBQUERY); + + this.executor = new LatchedExecutor(context.getIndexManager() + .getExecutorService(), nparallel); + +// this.latch = new CountDownLatch(controllerOp.arity()); + +// /* +// * Create FutureTasks for each subquery. The futures are submitted +// * to the Executor yet. That happens in call(). By deferring the +// * evaluation until call() we gain the ability to cancel all +// * subqueries if any subquery fails. +// */ +// for (BOp op : controllerOp.args()) { +// +// /* +// * Task runs subquery and cancels all subqueries in [tasks] if +// * it fails. +// */ +// tasks.add(new FutureTask<IRunningQuery>(new SubqueryTask(op, +// context)) { +// /* +// * Hook future to count down the latch when the task is +// * done. +// */ +// public void run() { +// try { +// super.run(); +// } finally { +// latch.countDown(); +// } +// } +// }); +// +// } + + } + + /** + * Evaluate the subquery. + * + * @todo Support limited parallelism for each binding set read from the + * source. We will need to keep track of the running subqueries in + * order to wait on them before returning from this method and in + * order to cancel them if something goes wrong. + */ + public Void call() throws Exception { + + try { + + final IAsynchronousIterator<IBindingSet[]> sitr = context + .getSource(); + + while(sitr.hasNext()) { + + final IBindingSet[] chunk = sitr.next(); + + for(IBindingSet bset : chunk) { + + FutureTask<IRunningQuery> ft = new FutureTask<IRunningQuery>( + new SubqueryTask(bset, subquery, context)); + + // run the subquery. + executor.execute(ft); + + try { + + // wait for the outcome. + ft.get(); + + } finally { + + /* + * Ensure that the inner task is cancelled if the + * outer task is interrupted. + */ + ft.cancel(true/* mayInterruptIfRunning */); + + } + + } + + } + +// /* +// * Run subqueries with limited parallelism. +// */ +// for (FutureTask<IRunningQuery> ft : tasks) { +// executor.execute(ft); +// } +// +// /* +// * Wait for all subqueries to complete. +// */ +// latch.await(); +// +// /* +// * Get the futures, throwing out any errors. +// */ +// for (FutureTask<IRunningQuery> ft : tasks) +// ft.get(); + + // Now that we know the subqueries ran Ok, flush the sink. + context.getSink().flush(); + + // Done. + return null; + + } finally { + +// // Cancel any tasks which are still running. +// for (FutureTask<IRunningQuery> ft : tasks) +// ft.cancel(true/* mayInterruptIfRunning */); + + context.getSource().close(); + + context.getSink().close(); + + if (context.getSink2() != null) + context.getSink2().close(); + + } + + } + + /** + * Run a subquery. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + private class SubqueryTask implements Callable<IRunningQuery> { + + /** + * The evaluation context for the parent query. + */ + private final BOpContext<IBindingSet> parentContext; + + /** + * The source binding set. This will be copied to the output if + * there are no solutions for the subquery (optional join + * semantics). + */ + private final IBindingSet bset; + + /** + * The root operator for the subquery. + */ + private final BOp subQueryOp; + + public SubqueryTask(final IBindingSet bset, final BOp subQuery, + final BOpContext<IBindingSet> parentContext) { + + this.bset = bset; + + this.subQueryOp = subQuery; + + this.parentContext = parentContext; + + } + + public IRunningQuery call() throws Exception { + + IAsynchronousIterator<IBindingSet[]> subquerySolutionItr = null; + try { + + final QueryEngine queryEngine = parentContext.getRunningQuery() + .getQueryEngine(); + +// final IRunningQuery runningQuery = queryEngine +// .eval(subQueryOp); + + final BOp startOp = BOpUtility.getPipelineStart(subQueryOp); + + final int startId = startOp.getId(); + + final UUID queryId = UUID.randomUUID(); + + // execute the subquery, passing in the source binding set. + final IRunningQuery runningQuery = queryEngine + .eval( + queryId, + (PipelineOp) subQueryOp, + new LocalChunkMessage<IBindingSet>( + queryEngine, + queryId, + startId, + -1 /* partitionId */, + new ThickAsynchronousIterator<IBindingSet[]>( + new IBindingSet[][] { new IBindingSet[] { bset } }))); + + // Iterator visiting the subquery solutions. + subquerySolutionItr = runningQuery.iterator(); + + // Copy solutions from the subquery to the query. + final long ncopied = BOpUtility.copy(subquerySolutionItr, + parentContext.getSink(), null/* sink2 */, + null/* constraints */, null/* stats */); + + // wait for the subquery. + runningQuery.get(); + + if (ncopied == 0L && optional) { + + /* + * Since there were no solutions for the subquery, copy + * the original binding set to the default sink. + */ + parentContext.getSink().add(new IBindingSet[]{bset}); + + } + + // done. + return runningQuery; + + } catch (Throwable t) { + + /* + * If a subquery fails, then propagate the error to the + * parent and rethrow the first cause error out of the + * subquery. + */ + throw new RuntimeException(ControllerTask.this.context + .getRunningQuery().halt(t)); + + } finally { + + if (subquerySolutionItr != null) + subquerySolutionItr.close(); + + } + + } + + } // SubqueryTask + + } // ControllerTask + +} Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/TestDeepCopy.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/TestDeepCopy.java 2011-01-12 04:44:24 UTC (rev 4074) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/TestDeepCopy.java 2011-01-12 18:17:51 UTC (rev 4075) @@ -45,12 +45,10 @@ import com.bigdata.bop.constraint.NE; import com.bigdata.bop.constraint.NEConstant; import com.bigdata.bop.constraint.OR; -import com.bigdata.rdf.internal.constraints.InlineEQ; -import com.bigdata.rdf.internal.constraints.InlineGE; -import com.bigdata.rdf.internal.constraints.InlineGT; -import com.bigdata.rdf.internal.constraints.InlineLE; -import com.bigdata.rdf.internal.constraints.InlineLT; -import com.bigdata.rdf.internal.constraints.InlineNE; +import com.bigdata.rdf.internal.constraints.CompareBOp; +import com.bigdata.rdf.internal.constraints.IsInline; +import com.bigdata.rdf.internal.constraints.IsLiteral; +import com.bigdata.rdf.internal.constraints.MathBOp; import com.bigdata.rdf.rules.RejectAnythingSameAsItself; import com.bigdata.rdf.spo.SPOPredicate; import com.bigdata.rdf.spo.SPOStarJoin; @@ -99,12 +97,10 @@ SPOStarJoin.class,// com.bigdata.rdf.magic.MagicPredicate.class,// // com.bigdata.rdf.internal.constraint - InlineEQ.class,// - InlineGE.class,// - InlineLT.class,// - InlineLE.class,// - InlineGT.class,// - InlineNE.class,// + CompareBOp.class,// + IsInline.class,// + IsLiteral.class,// + MathBOp.class,// // com.bigdata.rdf.inf RejectAnythingSameAsItself.class, Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/controller/TestOptionalJoinGroup.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/controller/TestOptionalJoinGroup.java 2011-01-12 04:44:24 UTC (rev 4074) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/controller/TestOptionalJoinGroup.java 2011-01-12 18:17:51 UTC (rev 4075) @@ -219,7 +219,7 @@ /** * Unit test for optional join group. Three joins are used and target a * {@link SliceOp}. The 2nd and 3rd joins are embedded in an - * {@link OptionalJoinGroup}. + * {@link SubqueryOp}. * <P> * The optional join group takes the form: * @@ -342,10 +342,10 @@ subQuery = join3Op; } - final PipelineOp joinGroup1Op = new OptionalJoinGroup(new BOp[]{join1Op}, + final PipelineOp joinGroup1Op = new SubqueryOp(new BOp[]{join1Op}, new NV(Predicate.Annotations.BOP_ID, joinGroup1),// // new NV(PipelineOp.Annotations.CONDITIONAL_GROUP, joinGroup1),// - new NV(OptionalJoinGroup.Annotations.SUBQUERY, subQuery)// + new NV(SubqueryOp.Annotations.SUBQUERY, subQuery)// // , new NV(BOp.Annotations.CONTROLLER,true)// // new NV(BOp.Annotations.EVALUATION_CONTEXT, // BOpEvaluationContext.CONTROLLER)// @@ -607,10 +607,10 @@ subQuery = join3Op; } - final PipelineOp joinGroup1Op = new OptionalJoinGroup(new BOp[]{join1Op}, + final PipelineOp joinGroup1Op = new SubqueryOp(new BOp[]{join1Op}, new NV(Predicate.Annotations.BOP_ID, joinGroup1),// // new NV(PipelineOp.Annotations.CONDITIONAL_GROUP, joinGroup1),// - new NV(OptionalJoinGroup.Annotations.SUBQUERY, subQuery)// + new NV(SubqueryOp.Annotations.SUBQUERY, subQuery)// // new NV(BOp.Annotations.CONTROLLER,true)// // new NV(BOp.Annotations.EVALUATION_CONTEXT, // BOpEvaluationContext.CONTROLLER)// @@ -717,7 +717,7 @@ /** * Unit test for optional join group with a filter on a variable outside the * optional join group. Three joins are used and target a {@link SliceOp}. - * The 2nd and 3rd joins are in embedded an {@link OptionalJoinGroup}. The + * The 2nd and 3rd joins are in embedded an {@link SubqueryOp}. The * optional join group contains a filter that uses a variable outside the * optional join group. * <P> @@ -868,10 +868,10 @@ subQuery = join3Op; } - final PipelineOp joinGroup1Op = new OptionalJoinGroup(new BOp[]{condOp}, + final PipelineOp joinGroup1Op = new SubqueryOp(new BOp[]{condOp}, new NV(Predicate.Annotations.BOP_ID, joinGroup1),// // new NV(PipelineOp.Annotations.CONDITIONAL_GROUP, joinGroup1),// - new NV(OptionalJoinGroup.Annotations.SUBQUERY, subQuery)// + new NV(SubqueryOp.Annotations.SUBQUERY, subQuery)// // new NV(BOp.Annotations.CONTROLLER,true)// // new NV(BOp.Annotations.EVALUATION_CONTEXT, // BOpEvaluationContext.CONTROLLER)// Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2011-01-12 04:44:24 UTC (rev 4074) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2011-01-12 18:17:51 UTC (rev 4075) @@ -37,8 +37,6 @@ import com.bigdata.btree.keys.IKeyBuilder; import com.bigdata.btree.keys.KeyBuilder; import com.bigdata.rawstore.Bytes; -import com.bigdata.rdf.internal.constraints.AbstractInlineConstraint; -import com.bigdata.rdf.internal.constraints.InlineGT; import com.bigdata.rdf.model.BigdataBNode; import com.bigdata.rdf.model.BigdataLiteral; Deleted: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/AbstractInlineConstraint.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/AbstractInlineConstraint.java 2011-01-12 04:44:24 UTC (rev 4074) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/AbstractInlineConstraint.java 2011-01-12 18:17:51 UTC (rev 4075) @@ -1,97 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package com.bigdata.rdf.internal.constraints; - -import java.util.Map; - -import com.bigdata.bop.BOp; -import com.bigdata.bop.Constant; -import com.bigdata.bop.IBindingSet; -import com.bigdata.bop.IConstant; -import com.bigdata.bop.IVariable; -import com.bigdata.bop.constraint.BOpConstraint; -import com.bigdata.rdf.internal.IV; -import com.bigdata.rdf.internal.IVUtility; - -/** - * Use inline terms to perform numerical comparison operations. - * - * @see IVUtility#numericalCompare(IV, IV) - */ -public abstract class AbstractInlineConstraint extends BOpConstraint { - - /** - * - */ - private static final long serialVersionUID = 1L; - - /** - * Required shallow copy constructor. - */ - public AbstractInlineConstraint(final BOp[] values, - final Map<String, Object> annotations) { - super(values, annotations); - } - - /** - * Required deep copy constructor. - */ - public AbstractInlineConstraint(final AbstractInlineConstraint op) { - super(op); - } - - protected AbstractInlineConstraint(final IVariable<IV> v, final IV iv) { - - super(new BOp[] { v, new Constant<IV>(iv) }, null/*annotations*/); - - if (v == null) - throw new IllegalArgumentException(); - - if (!IVUtility.canNumericalCompare(iv)) - throw new IllegalArgumentException(); - - } - - public boolean accept(final IBindingSet s) { - - // get binding for "x". - final IConstant<IV> c = s.get((IVariable<IV>) get(0)/* v */); - - if (c == null) - return true; // not yet bound. - - final IV term = c.get(); - - final IV iv = ((IConstant<IV>) get(1)/* iv */).get(); - - final int compare = IVUtility.numericalCompare(term, iv); - - return _accept(compare); - - } - - protected abstract boolean _accept(final int compare); - -} Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java 2011-01-12 04:44:24 UTC (rev 4074) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java 2011-01-12 18:17:51 UTC (rev 4075) @@ -92,17 +92,14 @@ if (left == null || right == null) return true; // not yet bound. - if (IVUtility.canNumericalCompare(left) && - IVUtility.canNumericalCompare(right)) { - - return _accept(IVUtility.numericalCompare(left, right)); - - } else { - - return _accept(left.compareTo(right)); - - } + if (!IVUtility.canNumericalCompare(left)) + throw new NotNumericalException("cannot numerical compare: " + left); + if (!IVUtility.canNumericalCompare(right)) + throw new NotNumericalException("cannot numerical compare: " + right); + + return _accept(IVUtility.numericalCompare(left, right)); + } protected boolean _accept(final int compare) { @@ -128,4 +125,29 @@ } + public static class NotNumericalException extends RuntimeException { + + /** + * + */ + private static final long serialVersionUID = -8853739187628588335L; + + public NotNumericalException() { + super(); + } + + public NotNumericalException(String s, Throwable t) { + super(s, t); + } + + public NotNumericalException(String s) { + super(s); + } + + public NotNumericalException(Throwable t) { + super(t); + } + + } + } Deleted: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineEQ.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineEQ.java 2011-01-12 04:44:24 UTC (rev 4074) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineEQ.java 2011-01-12 18:17:51 UTC (rev 4075) @@ -1,66 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package com.bigdata.rdf.internal.constraints; - -import java.util.Map; - -import com.bigdata.bop.BOp; -import com.bigdata.bop.IVariable; -import com.bigdata.rdf.internal.IV; - -/** - * Use inline numerical comparison techniques to implement the == operator. - */ -public class InlineEQ extends AbstractInlineConstraint { - - private static final long serialVersionUID = -859713006378534024L; - - /** - * Required shallow copy constructor. - */ - public InlineEQ(final BOp[] values, final Map<String, Object> annotations) { - super(values, annotations); - } - - /** - * Required deep copy constructor. - */ - public InlineEQ(final InlineEQ op) { - super(op); - } - - public InlineEQ(final IVariable<IV> v, final IV iv) { - - super(v, iv); - - } - - protected boolean _accept(final int compare) { - - return compare == 0; - - } - -} Deleted: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineGE.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineGE.java 2011-01-12 04:44:24 UTC (rev 4074) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineGE.java 2011-01-12 18:17:51 UTC (rev 4075) @@ -1,66 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package com.bigdata.rdf.internal.constraints; - -import java.util.Map; - -import com.bigdata.bop.BOp; -import com.bigdata.bop.IVariable; -import com.bigdata.rdf.internal.IV; - -/** - * Use inline numerical comparison techniques to implement the >= operator. - */ -public class InlineGE extends AbstractInlineConstraint { - - private static final long serialVersionUID = 5796593193255235408L; - - /** - * Required shallow copy constructor. - */ - public InlineGE(final BOp[] values, final Map<String, Object> annotations) { - super(values, annotations); - } - - /** - * Required deep copy constructor. - */ - public InlineGE(final InlineGE op) { - super(op); - } - - public InlineGE(final IVariable<IV> v, final IV iv) { - - super(v, iv); - - } - - protected boolean _accept(final int compare) { - - return compare >= 0; - - } - -} Deleted: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineGT.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineGT.java 2011-01-12 04:44:24 UTC (rev 4074) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineGT.java 2011-01-12 18:17:51 UTC (rev 4075) @@ -1,66 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package com.bigdata.rdf.internal.constraints; - -import java.util.Map; - -import com.bigdata.bop.BOp; -import com.bigdata.bop.IVariable; -import com.bigdata.rdf.internal.IV; - -/** - * Use inline numerical comparison techniques to implement the > operator. - */ -public class InlineGT extends AbstractInlineConstraint { - - private static final long serialVersionUID = 8104692462788944394L; - - /** - * Required shallow copy constructor. - */ - public InlineGT(final BOp[] values, final Map<String, Object> annotations) { - super(values, annotations); - } - - /** - * Required deep copy constructor. - */ - public InlineGT(final InlineGT op) { - super(op); - } - - public InlineGT(final IVariable<IV> v, final IV iv) { - - super(v, iv); - - } - - protected boolean _accept(final int compare) { - - return compare > 0; - - } - -} Deleted: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineLE.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineLE.java 2011-01-12 04:44:24 UTC (rev 4074) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineLE.java 2011-01-12 18:17:51 UTC (rev 4075) @@ -1,66 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package com.bigdata.rdf.internal.constraints; - -import java.util.Map; - -import com.bigdata.bop.BOp; -import com.bigdata.bop.IVariable; -import com.bigdata.rdf.internal.IV; - -/** - * Use inline numerical comparison techniques to implement the <= operator. - */ -public class InlineLE extends AbstractInlineConstraint { - - private static final long serialVersionUID = 7632756199316546837L; - - /** - * Required shallow copy constructor. - */ - public InlineLE(final BOp[] values, final Map<String, Object> annotations) { - super(values, annotations); - } - - /** - * Required deep copy constructor. - */ - public InlineLE(final InlineLE op) { - super(op); - } - - public InlineLE(final IVariable<IV> v, final IV iv) { - - super(v, iv); - - } - - protected boolean _accept(final int compare) { - - return compare <= 0; - - } - -} Deleted: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineLT.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineLT.java 2011-01-12 04:44:24 UTC (rev 4074) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineLT.java 2011-01-12 18:17:51 UTC (rev 4075) @@ -1,66 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package com.bigdata.rdf.internal.constraints; - -import java.util.Map; - -import com.bigdata.bop.BOp; -import com.bigdata.bop.IVariable; -import com.bigdata.rdf.internal.IV; - -/** - * Use inline numerical comparison techniques to implement the < operator. - */ -public class InlineLT extends AbstractInlineConstraint { - - private static final long serialVersionUID = 1012994769934551872L; - - /** - * Required shallow copy constructor. - */ - public InlineLT(final BOp[] values, final Map<String, Object> annotations) { - super(values, annotations); - } - - /** - * Required deep copy constructor. - */ - public InlineLT(final InlineLT op) { - super(op); - } - - public InlineLT(final IVariable<IV> v, final IV iv) { - - super(v, iv); - - } - - protected boolean _accept(final int compare) { - - return compare < 0; - - } - -} Deleted: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineNE.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineNE.java 2011-01-12 04:44:24 UTC (rev 4074) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineNE.java 2011-01-12 18:17:51 UTC (rev 4075) @@ -1,66 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package com.bigdata.rdf.internal.constraints; - -import java.util.Map; - -import com.bigdata.bop.BOp; -import com.bigdata.bop.IVariable; -import com.bigdata.rdf.internal.IV; - -/** - * Use inline numerical comparison techniques to implement the != operator. - */ -public class InlineNE extends AbstractInlineConstraint { - - pr... [truncated message content] |
From: <mrp...@us...> - 2011-01-11 04:44:31
|
Revision: 4073 http://bigdata.svn.sourceforge.net/bigdata/?rev=4073&view=rev Author: mrpersonick Date: 2011-01-11 04:44:22 +0000 (Tue, 11 Jan 2011) Log Message: ----------- working on nested optional groups Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IVariableOrConstant.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepositoryConnection.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNestedOptionals.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IValueExpression.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/AND.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/BOUND.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/NOT.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOp.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOp2BOpUtility.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOpTree.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOpTreeBuilder.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/UnsupportedOperatorException.java Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IValueExpression.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IValueExpression.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IValueExpression.java 2011-01-11 04:44:22 UTC (rev 4073) @@ -0,0 +1,25 @@ +package com.bigdata.bop; + +import java.io.Serializable; + +public interface IValueExpression<E> extends BOp, Serializable { + + /** + * Return the <i>as bound</i> value of the variable or constant. The <i>as + * bound</i> value of an {@link IConstant} is the contant's value. The <i>as + * bound</i> value of an {@link IVariable} is the bound value in the given + * {@link IBindingSet} -or- <code>null</code> if the variable is not bound + * in the {@link IBindingSet}. + * + * @param bindingSet + * The binding set. + * + * @return The as bound value of the constant or variable. + * + * @throws IllegalArgumentException + * if this is an {@link IVariable} and the <i>bindingSet</i> is + * <code>null</code>. + */ + E get(IBindingSet bindingSet); + +} Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IVariableOrConstant.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IVariableOrConstant.java 2011-01-11 01:37:17 UTC (rev 4072) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IVariableOrConstant.java 2011-01-11 04:44:22 UTC (rev 4073) @@ -37,7 +37,7 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ -public interface IVariableOrConstant<E> extends BOp, Serializable { +public interface IVariableOrConstant<E> extends IValueExpression<E> { /** * Return <code>true</code> iff this is a variable. @@ -64,24 +64,6 @@ E get(); /** - * Return the <i>as bound</i> value of the variable or constant. The <i>as - * bound</i> value of an {@link IConstant} is the contant's value. The <i>as - * bound</i> value of an {@link IVariable} is the bound value in the given - * {@link IBindingSet} -or- <code>null</code> if the variable is not bound - * in the {@link IBindingSet}. - * - * @param bindingSet - * The binding set. - * - * @return The as bound value of the constant or variable. - * - * @throws IllegalArgumentException - * if this is an {@link IVariable} and the <i>bindingSet</i> is - * <code>null</code>. - */ - E get(IBindingSet bindingSet); - - /** * Return the name of a variable. * * @throws UnsupportedOperationException Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/AND.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/AND.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/AND.java 2011-01-11 04:44:22 UTC (rev 4073) @@ -0,0 +1,74 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +package com.bigdata.bop.constraint; + +import java.util.Map; + +import com.bigdata.bop.BOpBase; +import com.bigdata.bop.BOp; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IConstraint; + +/** + * Imposes the constraint <code>x AND y</code>. + */ +public class AND extends BOpConstraint { + + /** + * + */ + private static final long serialVersionUID = -8146965892831895463L; + + /** + * Required deep copy constructor. + */ + public AND(final BOp[] args, final Map<String, Object> annotations) { + super(args, annotations); + } + + /** + * Required deep copy constructor. + */ + public AND(final AND op) { + super(op); + } + + public AND(final IConstraint x, final IConstraint y) { + + super(new BOp[] { x, y }, null/*annocations*/); + + if (x == null || y == null) + throw new IllegalArgumentException(); + + } + + public boolean accept(final IBindingSet s) { + + return ((IConstraint) get(0)).accept(s) + && ((IConstraint) get(1)).accept(s); + + } + +} Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/BOUND.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/BOUND.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/BOUND.java 2011-01-11 04:44:22 UTC (rev 4073) @@ -0,0 +1,73 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +package com.bigdata.bop.constraint; + +import java.util.Map; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IConstraint; +import com.bigdata.bop.IVariable; + +/** + * Imposes the constraint <code>bound(x)</code> for the variable x. + */ +public class BOUND extends BOpConstraint { + + /** + * + */ + private static final long serialVersionUID = -7408654639183330874L; + + /** + * Required deep copy constructor. + */ + public BOUND(final BOp[] args, final Map<String, Object> annotations) { + super(args, annotations); + } + + /** + * Required deep copy constructor. + */ + public BOUND(final BOUND op) { + super(op); + } + + public BOUND(final IVariable x) { + + super(new BOp[] { x }, null/*annocations*/); + + if (x == null) + throw new IllegalArgumentException(); + + } + + public boolean accept(final IBindingSet s) { + + return ((IVariable) get(0)).get(s) != null; + + } + +} Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/NOT.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/NOT.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/NOT.java 2011-01-11 04:44:22 UTC (rev 4073) @@ -0,0 +1,73 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +package com.bigdata.bop.constraint; + +import java.util.Map; + +import com.bigdata.bop.BOpBase; +import com.bigdata.bop.BOp; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IConstraint; + +/** + * Imposes the constraint <code>!x</code>. + */ +public class NOT extends BOpConstraint { + + /** + * + */ + private static final long serialVersionUID = -5701967329003122236L; + + /** + * Required deep copy constructor. + */ + public NOT(final BOp[] args, final Map<String, Object> annotations) { + super(args, annotations); + } + + /** + * Required deep copy constructor. + */ + public NOT(final NOT op) { + super(op); + } + + public NOT(final IConstraint x) { + + super(new BOp[] { x }, null/*annocations*/); + + if (x == null) + throw new IllegalArgumentException(); + + } + + public boolean accept(final IBindingSet s) { + + return !((IConstraint) get(0)).accept(s); + + } + +} Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java 2011-01-11 01:37:17 UTC (rev 4072) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java 2011-01-11 04:44:22 UTC (rev 4073) @@ -30,6 +30,7 @@ import com.bigdata.bop.BOp; import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IValueExpression; import com.bigdata.bop.IVariableOrConstant; import com.bigdata.bop.NV; import com.bigdata.bop.PipelineOp; @@ -73,8 +74,8 @@ super(op); } - public CompareBOp(final IVariableOrConstant<IV> left, - final IVariableOrConstant<IV> right, final CompareOp op) { + public CompareBOp(final IValueExpression<IV> left, + final IValueExpression<IV> right, final CompareOp op) { super(new BOp[] { left, right }, NV.asMap(new NV(Annotations.OP, op))); @@ -85,8 +86,8 @@ public boolean accept(final IBindingSet s) { - final IV left = ((IVariableOrConstant<IV>) get(0)).get(s); - final IV right = ((IVariableOrConstant<IV>) get(1)).get(s); + final IV left = ((IValueExpression<IV>) get(0)).get(s); + final IV right = ((IValueExpression<IV>) get(1)).get(s); if (left == null || right == null) return true; // not yet bound. Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java 2011-01-11 01:37:17 UTC (rev 4072) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java 2011-01-11 04:44:22 UTC (rev 4073) @@ -27,18 +27,18 @@ import com.bigdata.bop.BOp; import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IValueExpression; import com.bigdata.bop.IVariable; -import com.bigdata.bop.IVariableOrConstant; import com.bigdata.bop.ImmutableBOp; import com.bigdata.bop.NV; import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.internal.IVUtility; /** - * A math expression involving a left and right IVariableOrConstant operand. + * A math expression involving a left and right IValueExpression operand. */ final public class MathBOp extends ImmutableBOp - implements IVariableOrConstant<IV> { + implements IValueExpression<IV> { /** * @@ -55,19 +55,6 @@ } - final public boolean isVar() { - - return ((IVariableOrConstant) get(0)).isVar() || - ((IVariableOrConstant) get(1)).isVar(); - - } - - final public boolean isConstant() { - - return !isVar(); - - } - /** * Required deep copy constructor. * @@ -79,8 +66,8 @@ } - public MathBOp(final IVariableOrConstant<IV> left, - final IVariableOrConstant<IV> right, final MathOp op) { + public MathBOp(final IValueExpression<IV> left, + final IValueExpression<IV> right, final MathOp op) { super(new BOp[] { left, right }, NV.asMap(new NV(Annotations.OP, op))); @@ -98,12 +85,12 @@ // // } - public IVariableOrConstant<IV> left() { - return (IVariableOrConstant<IV>) get(0); + public IValueExpression<IV> left() { + return (IValueExpression<IV>) get(0); } - public IVariableOrConstant<IV> right() { - return (IVariableOrConstant<IV>) get(1); + public IValueExpression<IV> right() { + return (IValueExpression<IV>) get(1); } public MathOp op() { @@ -133,13 +120,11 @@ } - final public boolean equals(final IVariableOrConstant<IV> o) { + final public boolean equals(final IValueExpression<IV> o) { if(!(o instanceof MathBOp)) { - // incomparable types. return false; - } return equals((MathBOp) o); @@ -176,18 +161,6 @@ } - final public IV get() { - - final IV left = left().get(); - final IV right = right().get(); - - if (left == null || right == null) - return null; - - return IVUtility.numericalMath(left, right, op()); - - } - final public IV get(final IBindingSet bindingSet) { final IV left = left().get(bindingSet); @@ -200,10 +173,4 @@ } - final public String getName() { - - throw new UnsupportedOperationException(); - - } - } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java 2011-01-11 01:37:17 UTC (rev 4072) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java 2011-01-11 04:44:22 UTC (rev 4073) @@ -1607,10 +1607,10 @@ if (iv == null) return null; iv1 = new Constant<IV>(iv); - } else if (left instanceof MathExpr) { - iv1 = generateMath((MathExpr) left); - if (iv1 == null) - return null; +// } else if (left instanceof MathExpr) { +// iv1 = generateMath((MathExpr) left); +// if (iv1 == null) +// return null; } else { return null; } @@ -1622,10 +1622,10 @@ if (iv == null) return null; iv2 = new Constant<IV>(iv); - } else if (right instanceof MathExpr) { - iv2 = generateMath((MathExpr) right); - if (iv2 == null) - return null; +// } else if (right instanceof MathExpr) { +// iv2 = generateMath((MathExpr) right); +// if (iv2 == null) +// return null; } else { return null; } @@ -1648,10 +1648,10 @@ if (iv == null) return null; iv1 = new Constant<IV>(iv); - } else if (left instanceof MathExpr) { - iv1 = generateMath((MathExpr) left); - if (iv1 == null) - return null; +// } else if (left instanceof MathExpr) { +// iv1 = generateMath((MathExpr) left); +// if (iv1 == null) +// return null; } else { return null; } @@ -1663,10 +1663,10 @@ if (iv == null) return null; iv2 = new Constant<IV>(iv); - } else if (right instanceof MathExpr) { - iv2 = generateMath((MathExpr) right); - if (iv2 == null) - return null; +// } else if (right instanceof MathExpr) { +// iv2 = generateMath((MathExpr) right); +// if (iv2 == null) +// return null; } else { return null; } Added: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java 2011-01-11 04:44:22 UTC (rev 4073) @@ -0,0 +1,2176 @@ +package com.bigdata.rdf.sail; + +import info.aduna.iteration.CloseableIteration; +import info.aduna.iteration.EmptyIteration; + +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.log4j.Logger; +import org.openrdf.model.Literal; +import org.openrdf.model.URI; +import org.openrdf.model.Value; +import org.openrdf.query.BindingSet; +import org.openrdf.query.Dataset; +import org.openrdf.query.QueryEvaluationException; +import org.openrdf.query.algebra.And; +import org.openrdf.query.algebra.Bound; +import org.openrdf.query.algebra.Compare; +import org.openrdf.query.algebra.Compare.CompareOp; +import org.openrdf.query.algebra.Filter; +import org.openrdf.query.algebra.Group; +import org.openrdf.query.algebra.Join; +import org.openrdf.query.algebra.LeftJoin; +import org.openrdf.query.algebra.MathExpr; +import org.openrdf.query.algebra.MathExpr.MathOp; +import org.openrdf.query.algebra.MultiProjection; +import org.openrdf.query.algebra.Not; +import org.openrdf.query.algebra.Or; +import org.openrdf.query.algebra.Order; +import org.openrdf.query.algebra.Projection; +import org.openrdf.query.algebra.ProjectionElem; +import org.openrdf.query.algebra.ProjectionElemList; +import org.openrdf.query.algebra.QueryModelNode; +import org.openrdf.query.algebra.QueryRoot; +import org.openrdf.query.algebra.SameTerm; +import org.openrdf.query.algebra.StatementPattern; +import org.openrdf.query.algebra.StatementPattern.Scope; +import org.openrdf.query.algebra.TupleExpr; +import org.openrdf.query.algebra.UnaryTupleOperator; +import org.openrdf.query.algebra.Union; +import org.openrdf.query.algebra.ValueConstant; +import org.openrdf.query.algebra.ValueExpr; +import org.openrdf.query.algebra.Var; +import org.openrdf.query.algebra.evaluation.impl.EvaluationStrategyImpl; +import org.openrdf.query.algebra.evaluation.iterator.FilterIterator; +import org.openrdf.query.algebra.helpers.QueryModelVisitorBase; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.Constant; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IConstant; +import com.bigdata.bop.IConstraint; +import com.bigdata.bop.IPredicate; +import com.bigdata.bop.IPredicate.Annotations; +import com.bigdata.bop.IValueExpression; +import com.bigdata.bop.IVariable; +import com.bigdata.bop.IVariableOrConstant; +import com.bigdata.bop.NV; +import com.bigdata.bop.PipelineOp; +import com.bigdata.bop.ap.Predicate; +import com.bigdata.bop.constraint.AND; +import com.bigdata.bop.constraint.BOUND; +import com.bigdata.bop.constraint.EQ; +import com.bigdata.bop.constraint.INBinarySearch; +import com.bigdata.bop.constraint.NE; +import com.bigdata.bop.constraint.NOT; +import com.bigdata.bop.constraint.OR; +import com.bigdata.bop.engine.IRunningQuery; +import com.bigdata.bop.engine.QueryEngine; +import com.bigdata.bop.solutions.ISortOrder; +import com.bigdata.btree.IRangeQuery; +import com.bigdata.btree.keys.IKeyBuilderFactory; +import com.bigdata.rdf.internal.DummyIV; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.internal.constraints.CompareBOp; +import com.bigdata.rdf.internal.constraints.MathBOp; +import com.bigdata.rdf.lexicon.LexiconRelation; +import com.bigdata.rdf.model.BigdataValue; +import com.bigdata.rdf.sail.BigdataSail.Options; +import com.bigdata.rdf.sail.sop.SOp; +import com.bigdata.rdf.sail.sop.SOp2BOpUtility; +import com.bigdata.rdf.sail.sop.SOpTree; +import com.bigdata.rdf.sail.sop.SOpTree.SOpGroup; +import com.bigdata.rdf.sail.sop.SOpTreeBuilder; +import com.bigdata.rdf.sail.sop.UnsupportedOperatorException; +import com.bigdata.rdf.spo.DefaultGraphSolutionExpander; +import com.bigdata.rdf.spo.ExplicitSPOFilter; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.NamedGraphSolutionExpander; +import com.bigdata.rdf.spo.SPOPredicate; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.store.BD; +import com.bigdata.rdf.store.BigdataBindingSetResolverator; +import com.bigdata.relation.accesspath.ElementFilter; +import com.bigdata.relation.accesspath.IAsynchronousIterator; +import com.bigdata.relation.accesspath.IBuffer; +import com.bigdata.relation.accesspath.IElementFilter; +import com.bigdata.relation.rule.IAccessPathExpander; +import com.bigdata.relation.rule.IProgram; +import com.bigdata.relation.rule.IQueryOptions; +import com.bigdata.relation.rule.IRule; +import com.bigdata.relation.rule.Rule; +import com.bigdata.relation.rule.eval.ISolution; +import com.bigdata.relation.rule.eval.RuleStats; +import com.bigdata.search.FullTextIndex; +import com.bigdata.search.IHit; +import com.bigdata.striterator.ChunkedWrappedIterator; +import com.bigdata.striterator.Dechunkerator; +import com.bigdata.striterator.DistinctFilter; +import com.bigdata.striterator.IChunkedOrderedIterator; + +/** + * Extended to rewrite Sesame {@link TupleExpr}s onto native {@link Rule}s and + * to evaluate magic predicates for full text search, etc. Query evaluation can + * proceed either by Sesame 2 evaluation or, if {@link Options#NATIVE_JOINS} is + * enabled, then by translation of Sesame 2 query expressions into native + * {@link IRule}s and native evaluation of those {@link IRule}s. + * + * <h2>Query options</h2> + * The following summarizes how various high-level query language feature are + * mapped onto native {@link IRule}s. + * <dl> + * <dt>DISTINCT</dt> + * <dd>{@link IQueryOptions#isDistinct()}, which is realized using + * {@link DistinctFilter}.</dd> + * <dt>ORDER BY</dt> + * <dd>{@link IQueryOptions#getOrderBy()} is effected by a custom + * {@link IKeyBuilderFactory} which generates sort keys that capture the desired + * sort order from the bindings in an {@link ISolution}. Unless DISTINCT is + * also specified, the generated sort keys are made unique by appending a one up + * long integer to the key - this prevents sort keys that otherwise compare as + * equals from dropping solutions. Note that the SORT is actually imposed by the + * {@link DistinctFilter} using an {@link IKeyBuilderFactory} assembled from the + * ORDER BY constraints. + * + * FIXME BryanT - implement the {@link IKeyBuilderFactory}. + * + * FIXME MikeP - assemble the {@link ISortOrder}[] from the query and set on + * the {@link IQueryOptions}.</dd> + * <dt>OFFSET and LIMIT</dt> + * <dd> + * <p> + * {@link IQueryOptions#getSlice()}, which was effected as a conditional in + * the old "Nested Subquery With Join Threads Task" based on the + * {@link RuleStats#solutionCount}. Query {@link ISolution}s are counted as + * they are generated, but they are only entered into the {@link ISolution} + * {@link IBuffer} when the solutionCount is GE the OFFSET and LT the LIMIT. + * Query evaluation halts once the LIMIT is reached. + * </p> + * <p> + * Note that when DISTINCT and either LIMIT and/or OFFSET are specified + * together, then the LIMIT and OFFSET <strong>MUST</strong> be applied after + * the solutions have been generated since we may have to generate more than + * LIMIT solutions in order to have LIMIT <em>DISTINCT</em> solutions. We + * handle this for now by NOT translating the LIMIT and OFFSET onto the + * {@link IRule} and instead let Sesame close the iterator once it has enough + * solutions. + * </p> + * <p> + * Note that LIMIT and SLICE requires an evaluation plan that provides stable + * results. For a simple query this is achieved by setting + * {@link IQueryOptions#isStable()} to <code>true</code>. + * <p> + * For a UNION query, you must also set {@link IProgram#isParallel()} to + * <code>false</code> to prevent parallelized execution of the {@link IRule}s + * in the {@link IProgram}. + * </p> + * </dd> + * <dt>UNION</dt> + * <dd>A UNION is translated into an {@link IProgram} consisting of one + * {@link IRule} for each clause in the UNION. + * + * FIXME MikeP - implement.</dd> + * </dl> + * <h2>Filters</h2> + * The following provides a summary of how various kinds of FILTER are handled. + * A filter that is not explicitly handled is left untranslated and will be + * applied by Sesame against the generated {@link ISolution}s. + * <p> + * Whenever possible, a FILTER is translated into an {@link IConstraint} on an + * {@link IPredicate} in the generated native {@link IRule}. Some filters are + * essentially JOINs against the {@link LexiconRelation}. Those can be handled + * either as JOINs (generating an additional {@link IPredicate} in the + * {@link IRule}) or as an {@link INBinarySearch} constraint, where the inclusion set is + * pre-populated by some operation on the {@link LexiconRelation}. + * <dl> + * <dt>EQ</dt> + * <dd>Translated into an {@link EQ} constraint on an {@link IPredicate}.</dd> + * <dt>NE</dt> + * <dd>Translated into an {@link NE} constraint on an {@link IPredicate}.</dd> + * <dt>IN</dt> + * <dd>Translated into an {@link INBinarySearch} constraint on an {@link IPredicate}.</dd> + * <dt>OR</dt> + * <dd>Translated into an {@link OR} constraint on an {@link IPredicate}.</dd> + * <dt></dt> + * <dd></dd> + * </dl> + * <h2>Magic predicates</h2> + * <p> + * {@link BD#SEARCH} is the only magic predicate at this time. When the object + * position is bound to a constant, the magic predicate is evaluated once and + * the result is used to generate a set of term identifiers that are matches for + * the token(s) extracted from the {@link Literal} in the object position. Those + * term identifiers are then used to populate an {@link INBinarySearch} constraint. The + * object position in the {@link BD#SEARCH} MUST be bound to a constant. + * </p> + * + * FIXME We are not in fact rewriting the query operation at all, simply + * choosing a different evaluation path as we go. The rewrite should really be + * isolated from the execution, e.g., in its own class. That more correct + * approach is more than I want to get into right now as we will have to define + * variants on the various operators that let us model the native rule system + * directly, e.g., an n-ary IProgram, n-ary IRule operator, an IPredicate + * operator, etc. Then we can handle evaluation using their model with anything + * re-written to our custom operators being caught by our custom evaluate() + * methods and everything else running their default methods. Definitely the + * right approach, and much easier to write unit tests. + * + * @todo REGEX : if there is a "ˆ" literal followed by a wildcard + * AND there are no flags which would cause problems (case-folding, etc) + * then the REGEX can be rewritten as a prefix scan on the lexicon, which + * is very efficient, and converted to an IN filter. When the set size is + * huge we should rewrite it as another tail in the query instead. + * <p> + * Otherwise, regex filters are left outside of the rule. We can't + * optimize that until we generate rules that perform JOINs across the + * lexicon and the spo relations (which we could do, in which case it + * becomes a constraint on that join). + * <p> + * We don't have any indices that are designed to optimize regex scans, + * but we could process a regex scan as a parallel iterator scan against + * the lexicon. + * + * @todo Roll more kinds of filters into the native {@link IRule}s as + * {@link IConstraint}s on {@link IPredicate}s. + * <p> + * isURI(), etc. can be evaluated by testing a bit flag on the term + * identifier, which is very efficient. + * <p> + * + * @todo Verify handling of datatype operations. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: BigdataEvaluationStrategyImpl.java 2272 2009-11-04 02:10:19Z + * mrpersonick $ + */ +public class BigdataEvaluationStrategyImpl3 extends EvaluationStrategyImpl { + + /** + * Logger. + */ + protected static final Logger log = + Logger.getLogger(BigdataEvaluationStrategyImpl3.class); + + protected final BigdataTripleSource tripleSource; + + protected final Dataset dataset; + + private final AbstractTripleStore database; + + /** + */ + public BigdataEvaluationStrategyImpl3( + final BigdataTripleSource tripleSource, final Dataset dataset, + final boolean nativeJoins) { + + super(tripleSource, dataset); + + this.tripleSource = tripleSource; + this.dataset = dataset; + this.database = tripleSource.getDatabase(); + this.nativeJoins = nativeJoins; + + } + + /** + * If true, use native evaluation on the sesame operator tree if possible. + */ + private boolean nativeJoins; + + /** + * A set of properties that act as query hints during evaluation. + */ + private Properties queryHints; + + /** + * This is the top-level method called by the SAIL to evaluate a query. + * The TupleExpr parameter here is guaranteed to be the root of the operator + * tree for the query. Query hints are parsed by the SAIL from the + * namespaces in the original query. See {@link QueryHints#NAMESPACE}. + * <p> + * The query root will be handled by the native Sesame evaluation until we + * reach one of three possible top-level operators (union, join, or left + * join) at which point we will take over and translate the sesame operator + * tree into a native bigdata query. If in the process of this translation + * we encounter an operator that we cannot handle natively, we will log + * a warning message and punt to Sesame to let it handle the entire + * query evaluation process (much slower than native evaluation). + */ + public CloseableIteration<BindingSet, QueryEvaluationException> evaluate( + final TupleExpr expr, final BindingSet bindings, + final Properties queryHints) + throws QueryEvaluationException { + + // spit out the whole operator tree + if (log.isInfoEnabled()) { + log.info("operator tree:\n" + expr); + } + + this.queryHints = queryHints; + + if (log.isInfoEnabled()) { + log.info("queryHints:\n" + queryHints); + } + + return super.evaluate(expr, bindings); + + } + + /** + * Translate top-level UNIONs into native bigdata programs for execution. + * This will attempt to look down the operator tree from this point and turn + * the Sesame operators into a set of native rules within a single program. + * <p> + * FIXME A Union is a BinaryTupleOperator composed of two expressions. This + * native evaluation only handles the special case where the left and right + * args are one of: {Join, LeftJoin, StatementPattern, Union}. It's + * possible that the left or right arg is something other than one of those + * operators, in which case we punt to the Sesame evaluation, which + * degrades performance. + * <p> + * FIXME Also, even if the left or right arg is one of the cases we handle, + * it's possible that the translation of that arg into a native rule will + * fail because of an unsupported SPARQL language feature, such as an + * embedded UNION or an unsupported filter type. + */ + @Override + public CloseableIteration<BindingSet, QueryEvaluationException> evaluate( + final Union union, final BindingSet bs) + throws QueryEvaluationException { + + if (!nativeJoins) { + // Use Sesame 2 evaluation + return super.evaluate(union, bs); + } + + if (log.isInfoEnabled()) { + log.info("evaluating top-level Union operator"); + } + + try { + + return evaluateNatively(union, bs); + + } catch (UnsupportedOperatorException ex) { + + // Use Sesame 2 evaluation + + log.warn("could not evaluate natively, using Sesame evaluation"); + + if (log.isInfoEnabled()) { + log.info(ex.getOperator()); + } + + nativeJoins = false; + + return super.evaluate(union, bs); + + } + + } + + /** + * Translate top-level JOINs into native bigdata programs for execution. + * This will attempt to look down the operator tree from this point and turn + * the Sesame operators into a native rule. + * <p> + * FIXME It's possible that the translation of the left or right arg into a + * native rule will fail because of an unsupported SPARQL language feature, + * such as an embedded UNION or an unsupported filter type. + */ + @Override + public CloseableIteration<BindingSet, QueryEvaluationException> evaluate( + final Join join, final BindingSet bs) + throws QueryEvaluationException { + + if (!nativeJoins) { + // Use Sesame 2 evaluation + return super.evaluate(join, bs); + } + + if (log.isInfoEnabled()) { + log.info("evaluating top-level Join operator"); + } + + try { + + return evaluateNatively(join, bs); + + } catch (UnsupportedOperatorException ex) { + + // Use Sesame 2 evaluation + + log.warn("could not evaluate natively, using Sesame evaluation"); + + if (log.isInfoEnabled()) { + log.info(ex.getOperator()); + } + + nativeJoins = false; + + return super.evaluate(join, bs); + + } + + } + + /** + * Translate top-level LEFTJOINs into native bigdata programs for execution. + * This will attempt to look down the operator tree from this point and turn + * the Sesame operators into a native rule. + * <p> + * FIXME It's possible that the translation of the left or right arg into a + * native rule will fail because of an unsupported SPARQL language feature, + * such as an embedded UNION or an unsupported filter type. + */ + @Override + public CloseableIteration<BindingSet, QueryEvaluationException> evaluate( + final LeftJoin leftJoin, final BindingSet bs) + throws QueryEvaluationException { + + if (!nativeJoins) { + // Use Sesame 2 evaluation + return super.evaluate(leftJoin, bs); + } + + if (log.isInfoEnabled()) { + log.info("evaluating top-level LeftJoin operator"); + } + + try { + + return evaluateNatively(leftJoin, bs); + + } catch (UnsupportedOperatorException ex) { + + // Use Sesame 2 evaluation + + log.warn("could not evaluate natively, using Sesame evaluation"); + + if (log.isInfoEnabled()) { + log.info(ex.getOperator()); + } + + nativeJoins = false; + + return super.evaluate(leftJoin, bs); + + } + + } + + private CloseableIteration<BindingSet, QueryEvaluationException> + evaluateNatively(final TupleExpr tupleExpr, final BindingSet bs) + throws QueryEvaluationException, UnsupportedOperatorException { + try { + return _evaluateNatively(tupleExpr, bs); + } catch (UnrecognizedValueException ex) { + return new EmptyIteration<BindingSet, QueryEvaluationException>(); + } catch (QueryEvaluationException ex) { + throw ex; + } catch (Exception ex) { + throw new QueryEvaluationException(ex); + } + } + + private CloseableIteration<BindingSet, QueryEvaluationException> + _evaluateNatively(final TupleExpr root, final BindingSet bs) + throws UnsupportedOperatorException, UnrecognizedValueException, + QueryEvaluationException { + + final SOpTreeBuilder stb = new SOpTreeBuilder(); + + /* + * The sesame operator tree + */ + SOpTree sopTree; + + /* + * Turn the Sesame operator tree into something a little easier + * to work with. + */ + sopTree = stb.collectSOps(root); + + /* + * We need to prune groups that contain terms that do not appear in + * our lexicon. + */ + final Collection<SOpGroup> groupsToPrune = new LinkedList<SOpGroup>(); + + /* + * Iterate through the sop tree and translate statement patterns into + * predicates. + */ + for (SOp sop : sopTree) { + final QueryModelNode op = sop.getOperator(); + if (op instanceof StatementPattern) { + final StatementPattern sp = (StatementPattern) op; + try { + final IPredicate bop = toPredicate((StatementPattern) op); + sop.setBOp(bop); + } catch (UnrecognizedValueException ex) { + /* + * If we encounter a value not in the lexicon, we can + * still continue with the query if the value is in + * either an optional tail or an optional join group (i.e. + * if it appears on the right side of a LeftJoin). + * Otherwise we can stop evaluating right now. + */ + if (sop.isRightSideLeftJoin()) { + groupsToPrune.add(sopTree.getGroup(sop.getGroup())); + } else { + throw ex; + } + } + } + } + + /* + * Prunes the sop tree of optional join groups containing values + * not in the lexicon. + */ + sopTree = stb.pruneGroups(sopTree, groupsToPrune); + + /* + * If we have a filter in the root group (one that can be safely applied + * across the entire query) that we cannot translate into a native + * bigdata constraint, we can run it as a FilterIterator after the + * query has run natively. + */ + final Collection<ValueExpr> sesameFilters = new LinkedList<ValueExpr>(); + + /* + * We need to prune Sesame filters that we cannot translate into native + * constraints (ones that require lexicon joins). + */ + final Collection<SOp> sopsToPrune = new LinkedList<SOp>(); + + /* + * Iterate through the sop tree and translate Sesame ValueExpr operators + * into bigdata IConstraint boperators. + */ + for (SOp sop : sopTree) { + final QueryModelNode op = sop.getOperator(); + if (op instanceof ValueExpr) { + final ValueExpr ve = (ValueExpr) op; + try { + final IConstraint bop = toConstraint(ve); + sop.setBOp(bop); + } catch (UnsupportedOperatorException ex) { + /* + * If we encounter a sesame filter (ValueExpr) that we + * cannot translate, we can safely wrap the entire query + * with a Sesame filter iterator to capture that + * untranslatable value expression. If we are not in the + * root group however, we risk applying the filter to the + * wrong context (for example a filter inside an optional + * join group cannot be applied universally to the entire + * solution). In this case we must punt. + */ + if (sop.getGroup() == SOpTreeBuilder.ROOT_GROUP_ID) { + sopsToPrune.add(sop); + sesameFilters.add(ve); + } else { + throw ex; + } + } + } + } + + /* + * Prunes the sop tree of untranslatable filters. + */ + sopTree = stb.pruneSOps(sopTree, sopsToPrune); + + /* + * Make sure we don't have free text searches searching outside + * their named graph scope. + */ + attachNamedGraphsFilterToSearches(sopTree); + + /* + * Gather variables required by Sesame outside of the query + * evaluation (projection and global sesame filters). + */ + final IVariable[] required = + gatherRequiredVariables(root, sesameFilters); + + final QueryEngine queryEngine = tripleSource.getSail().getQueryEngine(); + + final PipelineOp query; + { + /* + * Note: The ids are assigned using incrementAndGet() so ONE (1) is + * the first id that will be assigned when we pass in ZERO (0) as + * the initial state of the AtomicInteger. + */ + final AtomicInteger idFactory = new AtomicInteger(0); + + // Convert the step to a bigdata operator tree. + query = SOp2BOpUtility.convert(sopTree, idFactory, database, + queryEngine, queryHints); + + if (log.isInfoEnabled()) + log.info(query); + + } + + return _evaluateNatively(query, bs, queryEngine, sesameFilters); + + } + + protected CloseableIteration<BindingSet, QueryEvaluationException> + _evaluateNatively(final PipelineOp query, final BindingSet bs, + final QueryEngine queryEngine, + final Collection<ValueExpr> sesameConstraints) + throws QueryEvaluationException { + + try { + + final IRunningQuery runningQuery = queryEngine.eval(query); + + final IAsynchronousIterator<IBindingSet[]> it1 = + runningQuery.iterator(); + + final IChunkedOrderedIterator<IBindingSet> it2 = + new ChunkedWrappedIterator<IBindingSet>( + new Dechunkerator<IBindingSet>(it1)); + + CloseableIteration<BindingSet, QueryEvaluationException> result = + new Bigdata2Sesame2BindingSetIterator<QueryEvaluationException>( + new BigdataBindingSetResolverator(database, it2).start( + database.getExecutorService())); + + // Wait for the Future (checks for errors). + runningQuery.get(); + + // use the basic filter iterator for remaining filters + if (sesameConstraints != null) { + for (ValueExpr ve : sesameConstraints) { + final Filter filter = new Filter(null, ve); + result = new FilterIterator(filter, result, this); + } + } + + return result; + + } catch (QueryEvaluationException ex) { + throw ex; + } catch (Exception ex) { + throw new QueryEvaluationException(ex); + } + + } + +// /** +// * This is the method that will attempt to take a top-level join or left +// * join and turn it into a native bigdata rule. The Sesame operators Join +// * and LeftJoin share only the common base class BinaryTupleOperator, but +// * other BinaryTupleOperators are not supported by this method. Other +// * specific types of BinaryTupleOperators will cause this method to throw +// * an exception. +// * <p> +// * This method will also turn a single top-level StatementPattern into a +// * rule with one predicate in it. +// * <p> +// * Note: As a pre-condition, the {@link Value}s in the query expression +// * MUST have been rewritten as {@link BigdataValue}s and their term +// * identifiers MUST have been resolved. Any term identifier that remains +// * {@link IRawTripleStore#NULL} is an indication that there is no entry for +// * that {@link Value} in the database. Since the JOINs are required (vs +// * OPTIONALs), that means that there is no solution for the JOINs and an +// * {@link EmptyIteration} is returned rather than evaluating the query. +// * +// * @param join +// * @return native bigdata rule +// * @throws UnsupportedOperatorException +// * this exception will be thrown if the Sesame join contains any +// * SPARQL language constructs that cannot be converted into +// * the bigdata native rule model +// * @throws QueryEvaluationException +// */ +// private IRule createNativeQueryOld(final TupleExpr join) +// throws UnsupportedOperatorException, +// QueryEvaluationException { +// +// if (!(join instanceof StatementPattern || +// join instanceof Join || join instanceof LeftJoin || +// join instanceof Filter)) { +// throw new AssertionError( +// "only StatementPattern, Join, and LeftJoin supported"); +// } +// +// // flattened collection of statement patterns nested within this join, +// // along with whether or not each one is optional +// final Map<StatementPattern, Boolean> stmtPatterns = +// new LinkedHashMap<StatementPattern, Boolean>(); +// // flattened collection of filters nested within this join +// final Collection<Filter> filters = new LinkedList<Filter>(); +// +// // will throw EncounteredUnknownTupleExprException if the join +// // contains something we don't handle yet +//// collectStatementPatterns(join, stmtPatterns, filters); +// +// if (false) { +// for (Map.Entry<StatementPattern, Boolean> entry : +// stmtPatterns.entrySet()) { +// log.debug(entry.getKey() + ", optional=" + entry.getValue()); +// } +// for (Filter filter : filters) { +// log.debug(filter.getCondition()); +// } +// } +// +// // generate tails +// Collection<IPredicate> tails = new LinkedList<IPredicate>(); +// // keep a list of free text searches for later to solve a named graphs +// // problem +// final Map<IPredicate, StatementPattern> searches = +// new HashMap<IPredicate, StatementPattern>(); +// for (Map.Entry<StatementPattern, Boolean> entry : stmtPatterns +// .entrySet()) { +// StatementPattern sp = entry.getKey(); +// boolean optional = entry.getValue(); +// IPredicate tail = toPredicate(sp, optional); +// // encountered a value not in the database lexicon +// if (tail == null) { +// if (log.isDebugEnabled()) { +// log.debug("could not generate tail for: " + sp); +// } +// if (optional) { +// // for optionals, just skip the tail +// continue; +// } else { +// // for non-optionals, skip the entire rule +// return null; +// } +// } +// if (tail.getAccessPathExpander() instanceof FreeTextSearchExpander) { +// searches.put(tail, sp); +// } +// tails.add(tail); +// } +// +// /* +// * When in quads mode, we need to go through the free text searches and +// * make sure that they are properly filtered for the dataset where +// * needed. Joins will take care of this, so we only need to add a filter +// * when a search variable does not appear in any other tails that are +// * non-optional. +// * +// * @todo Bryan seems to think this can be fixed with a DISTINCT JOIN +// * mechanism in the rule evaluation. +// */ +// if (database.isQuads() && dataset != null) { +// for (IPredicate search : searches.keySet()) { +// final Set<URI> graphs; +// StatementPattern sp = searches.get(search); +// switch (sp.getScope()) { +// case DEFAULT_CONTEXTS: { +// /* +// * Query against the RDF merge of zero or more source +// * graphs. +// */ +// graphs = dataset.getDefaultGraphs(); +// break; +// } +// case NAMED_CONTEXTS: { +// /* +// * Query against zero or more named graphs. +// */ +// graphs = dataset.getNamedGraphs(); +// break; +// } +// default: +// throw new AssertionError(); +// } +// if (graphs == null) { +// continue; +// } +// // why would we use a constant with a free text search??? +// if (search.get(0).isConstant()) { +// throw new AssertionError(); +// } +// // get ahold of the search variable +// com.bigdata.bop.Var searchVar = +// (com.bigdata.bop.Var) search.get(0); +// if (log.isDebugEnabled()) { +// log.debug(searchVar); +// } +// // start by assuming it needs filtering, guilty until proven +// // innocent +// boolean needsFilter = true; +// // check the other tails one by one +// for (IPredicate<ISPO> tail : tails) { +// IAccessPathExpander<ISPO> expander = +// tail.getAccessPathExpander(); +// // only concerned with non-optional tails that are not +// // themselves magic searches +// if (expander instanceof FreeTextSearchExpander +// || tail.isOptional()) { +// continue; +// } +// // see if the search variable appears in this tail +// boolean appears = false; +// for (int i = 0; i < tail.arity(); i++) { +// IVariableOrConstant term = tail.get(i); +// if (log.isDebugEnabled()) { +// log.debug(term); +// } +// if (term.equals(searchVar)) { +// appears = true; +// break; +// } +// } +// // if it appears, we don't need a filter +// if (appears) { +// needsFilter = false; +// break; +// } +// } +// // if it needs a filter, add it to the expander +// if (needsFilter) { +// if (log.isDebugEnabled()) { +// log.debug("needs filter: " + searchVar); +// } +// FreeTextSearchExpander expander = (FreeTextSearchExpander) +// search.getAccessPathExpander(); +// expander.addNamedGraphsFilter(graphs); +// } +// } +// } +// +// // generate constraints +// final Collection<IConstraint> constraints = +// new LinkedList<IConstraint>(); +// final Iterator<Filter> filterIt = filters.iterator(); +// while (filterIt.hasNext()) { +// final Filter filter = filterIt.next(); +// final IConstraint constraint = toConstraint(filter.getCondition()); +// if (constraint != null) { +// // remove if we are able to generate a native constraint for it +// if (log.isDebugEnabled()) { +// log.debug("able to generate a constraint: " + constraint); +// } +// filterIt.remove(); +// constraints.add(constraint); +// } +// } +// +// /* +// * FIXME Native slice, DISTINCT, etc. are all commented out for now. +// * Except for ORDER_BY, support exists for all of these features in the +// * native rules, but we need to separate the rewrite of the tupleExpr +// * and its evaluation in order to properly handle this stuff. +// */ +// IQueryOptions queryOptions = QueryOptions.NONE; +// // if (slice) { +// // if (!distinct && !union) { +// // final ISlice slice = new Slice(offset, limit); +// // queryOptions = new QueryOptions(false/* distinct */, +// // true/* stable */, null/* orderBy */, slice); +// // } +// // } else { +// // if (distinct && !union) { +// // queryOptions = QueryOptions.DISTINCT; +// // } +// // } +// +//// if (log.isDebugEnabled()) { +//// for (IPredicate<ISPO> tail : tails) { +//// IAccessPathExpander<ISPO> expander = tail.getAccessPathExpander(); +//// if (expander != null) { +//// IAccessPath<ISPO> accessPath = database.getSPORelation() +//// .getAccessPath(tail); +//// accessPath = expander.getAccessPath(accessPath); +//// IChunkedOrderedIterator<ISPO> it = accessPath.iterator(); +//// while (it.hasNext()) { +//// log.debug(it.next().toString(database)); +//// } +//// } +//// } +//// } +// +// /* +// * Collect a set of variables required beyond just the join (i.e. +// * aggregation, projection, filters, etc.) +// */ +// Set<String> required = new HashSet<String>(); +// +// try { +// +// QueryModelNode p = join; +// while (true) { +// p = p.getParentNode(); +// if (log.isDebugEnabled()) { +// log.debug(p.getClass()); +// } +// if (p instanceof UnaryTupleOperator) { +// required.addAll(collectVariables((UnaryTupleOperator) p)); +// } +// if (p instanceof QueryRoot) { +// break; +// } +// } +// +// if (filters.size() > 0) { +// for (Filter filter : filters) { +// required.addAll(collectVariables((UnaryTupleOperator) filter)); +// } +// } +// +// } catch (Exception ex) { +// throw new QueryEvaluationException(ex); +// } +// +// IVariable[] requiredVars = new IVariable[required.size()]; +// int i = 0; +// for (String v : required) { +// requiredVars[i++] = com.bigdata.bop.Var.var(v); +// } +// +// if (log.isDebugEnabled()) { +// log.debug("required binding names: " + Arrays.toString(requiredVars)); +// } +// +//// if (starJoins) { // database.isQuads() == false) { +//// if (log.isDebugEnabled()) { +//// log.debug("generating star joins"); +//// } +//// tails = generateStarJoins(tails); +//// } +// +// // generate native rule +// IRule rule = new Rule("nativeJoin", +// // @todo should serialize the query string here for the logs. +// null, // head +// tails.toArray(new IPredicate[tails.size()]), queryOptions, +// // constraints on the rule. +// constraints.size() > 0 ? constraints +// .toArray(new IConstraint[constraints.size()]) : null, +// null/* constants */, null/* taskFactory */, requiredVars); +// +// if (BigdataStatics.debug) { +// System.err.println(join.toString()); +// System.err.println(rule.toString()); +// } +// +// // we have filters that we could not translate natively +// if (filters.size() > 0) { +// if (log.isDebugEnabled()) { +// log.debug("could not translate " + filters.size() +// + " filters into native constraints:"); +// for (Filter filter : filters) { +// log.debug("\n" + filter.getCondition()); +// } +// } +// // use the basic filter iterator for remaining filters +//// rule = new ProxyRuleWithSesameFilters(rule, filters); +// } +// +// return rule; +// +// } + + private void attachNamedGraphsFilterToSearches(final SOpTree sopTree) { + + /* + * When in quads mode, we need to go through the free text searches and + * make sure that they are properly filtered for the dataset where + * needed. Joins will take care of this, so we only need to add a filter + * when a search variable does not appear in any other tails that are + * non-optional. + * + * @todo Bryan seems to think this can be fixed with a DISTINCT JOIN + * mechanism in the rule evaluation. + */ + if (database.isQuads() && dataset != null) { +// for (IPredicate search : searches.keySet()) { + for (SOp sop : sopTree) { + final QueryModelNode op = sop.getOperator(); + if (!(op instanceof StatementPattern)) { + continue; + } + final StatementPattern sp = (StatementPattern) op; + final IPredicate pred = (IPredicate) sop.getBOp(); + if (!(pred.getAccessPat... [truncated message content] |
From: <tho...@us...> - 2011-01-11 01:37:24
|
Revision: 4072 http://bigdata.svn.sourceforge.net/bigdata/?rev=4072&view=rev Author: thompsonbry Date: 2011-01-11 01:37:17 +0000 (Tue, 11 Jan 2011) Log Message: ----------- Removed the CONTROLLER annotation for the OptionalJoinGroup operator. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/OptionalJoinGroup.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/controller/TestOptionalJoinGroup.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/OptionalJoinGroup.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/OptionalJoinGroup.java 2011-01-10 22:09:06 UTC (rev 4071) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/OptionalJoinGroup.java 2011-01-11 01:37:17 UTC (rev 4072) @@ -129,8 +129,8 @@ getRequiredProperty(Annotations.SUBQUERY); - if (!getProperty(Annotations.CONTROLLER, Annotations.DEFAULT_CONTROLLER)) - throw new IllegalArgumentException(Annotations.CONTROLLER); +// if (!getProperty(Annotations.CONTROLLER, Annotations.DEFAULT_CONTROLLER)) +// throw new IllegalArgumentException(Annotations.CONTROLLER); // // The id of this operator (if any). // final Integer thisId = (Integer)getProperty(Annotations.BOP_ID); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/controller/TestOptionalJoinGroup.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/controller/TestOptionalJoinGroup.java 2011-01-10 22:09:06 UTC (rev 4071) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/controller/TestOptionalJoinGroup.java 2011-01-11 01:37:17 UTC (rev 4072) @@ -345,8 +345,8 @@ final PipelineOp joinGroup1Op = new OptionalJoinGroup(new BOp[]{join1Op}, new NV(Predicate.Annotations.BOP_ID, joinGroup1),// // new NV(PipelineOp.Annotations.CONDITIONAL_GROUP, joinGroup1),// - new NV(OptionalJoinGroup.Annotations.SUBQUERY, subQuery),// - new NV(BOp.Annotations.CONTROLLER,true)// + new NV(OptionalJoinGroup.Annotations.SUBQUERY, subQuery)// +// , new NV(BOp.Annotations.CONTROLLER,true)// // new NV(BOp.Annotations.EVALUATION_CONTEXT, // BOpEvaluationContext.CONTROLLER)// // // join is optional. @@ -610,8 +610,8 @@ final PipelineOp joinGroup1Op = new OptionalJoinGroup(new BOp[]{join1Op}, new NV(Predicate.Annotations.BOP_ID, joinGroup1),// // new NV(PipelineOp.Annotations.CONDITIONAL_GROUP, joinGroup1),// - new NV(OptionalJoinGroup.Annotations.SUBQUERY, subQuery),// - new NV(BOp.Annotations.CONTROLLER,true)// + new NV(OptionalJoinGroup.Annotations.SUBQUERY, subQuery)// +// new NV(BOp.Annotations.CONTROLLER,true)// // new NV(BOp.Annotations.EVALUATION_CONTEXT, // BOpEvaluationContext.CONTROLLER)// // // join is optional. @@ -871,8 +871,8 @@ final PipelineOp joinGroup1Op = new OptionalJoinGroup(new BOp[]{condOp}, new NV(Predicate.Annotations.BOP_ID, joinGroup1),// // new NV(PipelineOp.Annotations.CONDITIONAL_GROUP, joinGroup1),// - new NV(OptionalJoinGroup.Annotations.SUBQUERY, subQuery),// - new NV(BOp.Annotations.CONTROLLER,true)// + new NV(OptionalJoinGroup.Annotations.SUBQUERY, subQuery)// +// new NV(BOp.Annotations.CONTROLLER,true)// // new NV(BOp.Annotations.EVALUATION_CONTEXT, // BOpEvaluationContext.CONTROLLER)// // // join is optional. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-10 22:09:12
|
Revision: 4071 http://bigdata.svn.sourceforge.net/bigdata/?rev=4071&view=rev Author: thompsonbry Date: 2011-01-10 22:09:06 +0000 (Mon, 10 Jan 2011) Log Message: ----------- Added 'optional' annotation to OptionalJoinGroup. This operator should perhaps be renamed 'SubqueryOp'. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/OptionalJoinGroup.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/OptionalJoinGroup.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/OptionalJoinGroup.java 2011-01-09 21:21:38 UTC (rev 4070) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/controller/OptionalJoinGroup.java 2011-01-10 22:09:06 UTC (rev 4071) @@ -78,6 +78,15 @@ String SUBQUERY = OptionalJoinGroup.class.getName() + ".subquery"; /** + * When <code>true</code> the subquery has optional semantics (if the + * subquery fails, the original binding set will be passed along to the + * downstream sink anyway). + */ + String OPTIONAL = OptionalJoinGroup.class.getName() + ".optional"; + + boolean DEFAULT_OPTIONAL = true; + + /** * The maximum parallelism with which the subqueries will be evaluated * (default {@value #DEFAULT_MAX_PARALLEL}). */ @@ -163,6 +172,7 @@ private final BOpContext<IBindingSet> context; // private final List<FutureTask<IRunningQuery>> tasks = new LinkedList<FutureTask<IRunningQuery>>(); // private final CountDownLatch latch; + private final boolean optional; private final int nparallel; private final PipelineOp subquery; private final Executor executor; @@ -179,6 +189,9 @@ this.context = context; + this.optional = controllerOp.getProperty(Annotations.OPTIONAL, + Annotations.DEFAULT_OPTIONAL); + this.nparallel = controllerOp.getProperty(Annotations.MAX_PARALLEL, Annotations.DEFAULT_MAX_PARALLEL); @@ -385,7 +398,7 @@ // wait for the subquery. runningQuery.get(); - if (ncopied == 0L) { + if (ncopied == 0L && optional) { /* * Since there were no solutions for the subquery, copy This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-09 21:21:44
|
Revision: 4070 http://bigdata.svn.sourceforge.net/bigdata/?rev=4070&view=rev Author: thompsonbry Date: 2011-01-09 21:21:38 +0000 (Sun, 09 Jan 2011) Log Message: ----------- Elevated the journal.ha tests to a higher level and modified things so that the RWStore test suite was being run correctly. Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/TestAll.java branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/TestAll.java branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestAll.java Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/TestAll.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/TestAll.java 2011-01-09 20:58:02 UTC (rev 4069) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/TestAll.java 2011-01-09 21:21:38 UTC (rev 4070) @@ -101,6 +101,7 @@ // Note: this has a dependency on the quorum package. suite.addTest(com.bigdata.io.writecache.TestAll.suite()); suite.addTest( com.bigdata.journal.TestAll.suite() ); + suite.addTest( com.bigdata.journal.ha.TestAll.suite() ); suite.addTest( com.bigdata.resources.TestAll.suite() ); suite.addTest( com.bigdata.mdi.TestAll.suite() ); suite.addTest( com.bigdata.service.TestAll.suite() ); Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/TestAll.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/TestAll.java 2011-01-09 20:58:02 UTC (rev 4069) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/TestAll.java 2011-01-09 21:21:38 UTC (rev 4070) @@ -140,13 +140,15 @@ suite.addTest( com.bigdata.rwstore.TestAll.suite() ); - /* - * High Availability test suite. - * - * Note: There is a separate test suite for DataService high - * availability and for the zookeeper HA integration. - */ - suite.addTest(com.bigdata.journal.ha.TestAll.suite()); +// /* @todo This has been moved up to the top-level for how to help +// * distinguish HA related build errors from Journal build errors. + +// * High Availability test suite. +// * +// * Note: There is a separate test suite for DataService high +// * availability and for the zookeeper HA integration. +// */ +// suite.addTest(com.bigdata.journal.ha.TestAll.suite()); return suite; Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestAll.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestAll.java 2011-01-09 20:58:02 UTC (rev 4069) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestAll.java 2011-01-09 21:21:38 UTC (rev 4070) @@ -61,7 +61,7 @@ final TestSuite suite = new TestSuite("r/w store"); - suite.addTestSuite(com.bigdata.rwstore.TestRWJournal.class); + suite.addTest(com.bigdata.rwstore.TestRWJournal.suite()); return suite; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-09 20:58:16
|
Revision: 4069 http://bigdata.svn.sourceforge.net/bigdata/?rev=4069&view=rev Author: thompsonbry Date: 2011-01-09 20:58:02 +0000 (Sun, 09 Jan 2011) Log Message: ----------- Merge JOURNAL_HA_BRANCH to QUADS_QUERY_BRANCH [2601:4061]. A summary of known test failures follows. I have not yet tested quads query against the bigdata federation (that will require a checkout on a machine with more resources). My focus in creating this summary was to verify the branch-to-branch merge. For the most part, it appears that test failures in the per-merge version of the branch were pre-existing failures in either the JOURNAL_HA_BRANCH or the QUADS_QUERY_BRANCH. Notes appear beneath the test suite to which they apply. When there are no comments, the test suite ran without any failures or errors. suite.addTest( com.bigdata.cache.TestAll.suite() ); TestHardReferenceGlobalLRURecycler#test_concurrentOperations() TestHardReferenceGlobalLRURecyclerWithExplicitDeleteRequired#test_concurrentOperations() suite.addTest( com.bigdata.io.TestAll.suite() ); TestFileChannel#test_transferAllFrom() - hangs under OSX. suite.addTest( com.bigdata.net.TestAll.suite() ); suite.addTest( com.bigdata.config.TestAll.suite() ); suite.addTest( com.bigdata.util.TestAll.suite() ); suite.addTest( com.bigdata.util.concurrent.TestAll.suite() ); 1 failure (before merge under OS X). Ok under Windows after merge. suite.addTest( com.bigdata.striterator.TestAll.suite() ); suite.addTest( com.bigdata.counters.TestAll.suite() ); suite.addTest( com.bigdata.rawstore.TestAll.suite() ); suite.addTest( com.bigdata.btree.TestAll.suite() ); suite.addTest( com.bigdata.concurrent.TestAll.suite() ); suite.addTest( com.bigdata.quorum.TestAll.suite() ); suite.addTest( com.bigdata.ha.TestAll.suite() ); // Note: this has a dependency on the quorum package. suite.addTest(com.bigdata.io.writecache.TestAll.suite()); suite.addTest( com.bigdata.journal.TestAll.suite() ); This failure was also present in the JOURNAL_HA_BRANCH: java.lang.IllegalArgumentException: The commit counter must be greter than zero if there is a commit record: commitRecordAddr=50331651, but commitCounter=0 at com.bigdata.journal.RootBlockView.<init>(RootBlockView.java:471) at com.bigdata.journal.TestRootBlockView.test_ctor_correctRejection(TestRootBlockView.java:404) suite.addTest( com.bigdata.journal.ha.TestAll.suite() ); This gets a lot of test errors in the quads branch (post-merge of course). Those errors are also present in the JOURNAL_HA_BRANCH. suite.addTest( com.bigdata.resources.TestAll.suite() ); suite.addTest( com.bigdata.relation.TestAll.suite() ); suite.addTest( com.bigdata.bop.TestAll.suite() ); suite.addTest( com.bigdata.relation.rule.eval.TestAll.suite() ); suite.addTest( com.bigdata.mdi.TestAll.suite() ); Note: This test suite is now empty - the tests are now invoked from the service test suite (for the HA branch as well). suite.addTest( com.bigdata.service.TestAll.suite() ); - Pre-merge: Takes a long time to run under OSX (2000s, 2 errors; but 280s and 2 failures 1 error on the next CI run (#4)). When run from eclipse, only TestMasterTimeoutIdleTask#test_idleTimeout_LT_chunkTimeout() fails. However, it fails repeatably under OSX (before merge) with assertion error at line #473. However, running under ant it might be running additional tests which interact with zk or jini (verify this). - Post-merge: TestMasterTask#test_writeStartStop2() fails once (passes on retry so this is one of the stochastic problems with that test suite). No other errors. suite.addTest( com.bigdata.bop.fed.TestAll.suite() ); - The only failures are "test_something()" methods. suite.addTest( com.bigdata.sparse.TestAll.suite() ); suite.addTest( com.bigdata.search.TestAll.suite() ); suite.addTest( com.bigdata.bfs.TestAll.suite() ); TestFileMetadataIndex#test_create_update() - fail. TestFileMetadataIndex#test_delete01() - fail TestRangeScan#test_rangeScan() - write test TestRangeDelete#test_rangeDelete() - write test // suite.addTest( com.bigdata.service.mapReduce.TestAll.suite() ); // Jini integration suite.addTest(com.bigdata.jini.TestAll.suite()); - Not tested under eclipse. // RDF suite.addTest(com.bigdata.rdf.TestAll.suite()); - 8 failures before merge under OSX CI build. suite.addTest(com.bigdata.rdf.sail.TestAll.suite()); There are some failures here which are related to the inlining work which Mike is currently performing and to the lack of a port from the JOURNAL_HA_BRANCH of the magic search integration into the SAIL. The failures are summarized below. TestNamedGraphs - leaks journal files (maybe testSearchQuery?). TestSearchQuery#testWithMetadata() - leaks journal files. TestTempTripleStore:: TestSPOStarJoin#testStarJoin1() fails. TestSPOStarJoin#testStarJoin2() fails. TestLocalTripleStore:: TestSPOStarJoin#testStarJoin1() fails. TestSPOStarJoin#testStarJoin2() fails. TestLocalTripleStoreWithoutStatementIdentifiers:: TestSPOStarJoin#testStarJoin1() fails. TestSPOStarJoin#testStarJoin2() fails. TestBigdataSailWithQuads:: TestNamedGraphs#testSearchQuery() fails. TestSearchQuery#testWithMetadata() fails. TestBigdataEvaluationStrategyImpl#test_free_text_search() fails. BigdataConnectionTest#testPreparedTupleQuery2() fails. BigdataSparqlTest#open-cmp-01 fails. BigdataSparqlTest#open-cmp-02 fails. TestBigdataSailWithoutSids:: TestSearchQuery#testWithMetadata() fails. TestBigdataEvaluationStrategyImpl#test_free_text_search() fails. TestBigdataSailWithSids:: TestSearchQuery#testWithMetadata() fails. TestBigdataEvaluationStrategyImpl#test_free_text_search() fails. TestBigdataSailWithSidsWithoutInlining:: TestSearchQuery#testWithMetadata() fails. TestBigdataEvaluationStrategyImpl#test_free_text_search() fails. Next steps are to clean up CI against the post-merge version of the branch and to bring forward some remaining features from the JOURNAL_HA_BRANCH which are related to the SAIL. - MikeP hand reconcile: BigdataEvaluationStrategy2 => BigdataEvaluationStrategy (magic search feature port). - MikeP update IsInline and IsLiteral to the BOp model. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/.project branches/QUADS_QUERY_BRANCH/bigdata/src/architecture/mergePriority.xls branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/AbstractBTree.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BTree.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BytesUtil.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexMetadata.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentBuilder.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentCheckpoint.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/Node.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilder.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/keys/KeyBuilder.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractLocalTransactionManager.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/BufferMode.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/CommitRecord.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/CommitRecordIndex.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/ConcurrencyManager.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/DirectBufferStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/DiskBackedBufferStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/DumpJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/FileMetadata.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/IAtomicStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/IBufferStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/IRootBlockView.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/ITransactionService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/JournalTransactionService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/Options.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockView.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryRawStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TransientBufferStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/WORMStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/package.html branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rawstore/AbstractRawStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rawstore/IAddressManager.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rawstore/IRawStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rawstore/WormAddressManager.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/AbstractResource.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/pipeline/JoinTaskFactoryTask.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/Allocator.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedOutputStream.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/IStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/PSOutputStream.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/StorageTerminalError.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/FullTextIndex.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/Hit.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/Hiterator.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/ReadIndexTask.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/TermFrequencyData.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/TermMetadata.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/TokenBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/ChecksumError.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/ChecksumUtility.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentAddressManager.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentCheckpoint.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/counters/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractBufferStrategyTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractIndexManagerTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractInterruptsTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractJournalTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractMRMWTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractMROWTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractRestartSafeTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/StressTestConcurrentTx.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/StressTestConcurrentUnisolatedIndices.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestAbort.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestCommitHistory.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestConcurrentJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestJournalBasics.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestJournalShutdown.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestRootBlockView.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestWORMStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/rawstore/AbstractRawStoreTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/relation/rule/AbstractRuleTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/relation/rule/TestRule.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/search/TestKeyBuilder.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/search/TestPrefixSearch.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/util/TestChecksumUtility.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/BigdataZooDefs.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/ManageLogicalServiceTask.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/config/JiniCoreServicesConfiguration.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/config/ZookeeperClientConfig.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/process/JiniCoreServicesProcessHelper.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/zookeeper/AbstractZNodeConditionWatcher.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/zookeeper/DumpZookeeper.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/zookeeper/HierarchicalZNodeWatcher.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/zookeeper/UnknownChildrenWatcher.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/zookeeper/ZLockImpl.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/zookeeper/ZooKeeperAccessor.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/jini/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/zookeeper/AbstractZooTestCase.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/zookeeper/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/zookeeper/TestZLockImpl.java branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/RWStore.properties branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/build.properties branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/build.xml branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/RWStore.properties branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/build.properties branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/build.xml branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/ClosureStats.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/Justification.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/DefaultExtensionFactory.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IExtensionFactory.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSD.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InlineEQ.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/magic/IRISUtils.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/LoadStats.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rules/AbstractRuleDistinctTermScan.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/DefaultGraphSolutionExpander.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPORelation.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractLocalTripleStore.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BD.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/vocab/Vocabulary.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/internal/SampleExtensionFactory.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/internal/TestEncodeDecodeKeys.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/metrics/TaskATest.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/metrics/TestMetrics.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestJustifications.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestRuleFastClosure_3_5_6_7_9.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestSlice.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPORelation.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/FreeTextSearchExpander.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNamedGraphs.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSearchQuery.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/stress/testSimpleLubm.xml branches/QUADS_QUERY_BRANCH/build.properties branches/QUADS_QUERY_BRANCH/build.xml Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/architecture/RWStore.xls branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/counters/striped/ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/counters/striped/StripedCounters.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/HACommitGlue.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/HAGlue.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/HAGlueBase.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/HAPipelineGlue.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/HAReadGlue.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumCommit.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumRead.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumReadImpl.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/package.html branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/pipeline/ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/pipeline/HAWriteMessageBase.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/pipeline/package.html branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/AllocationData.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/ObjectSocketChannelStream.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/messages/ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/IWriteCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/package.html branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/DeleteBlockCommitter.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/JournalDelegate.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/RWAddressManager.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockCommitter.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockUtility.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/ha/ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/ha/HAWriteMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/ha/package.html branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/AbstractQuorumClient.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/AbstractQuorumMember.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/AsynchronousQuorumCloseException.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/Quorum.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumActor.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumClient.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumEvent.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumEventEnum.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumException.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumListener.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumMember.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumStateChangeListener.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumStateChangeListenerBase.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumWatcher.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/package.html branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/rule/Binding.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/rule/IBinding.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/DirectFixedAllocator.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/IAllocationContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/PhysicalAddressResolutionException.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/StorageStats.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/DefaultAnalyzerFactory.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/IAnalyzerFactory.java branches/QUADS_QUERY_BRANCH/bigdata/src/samples/com/bigdata/samples/btree/JournalReadOnlyTxExample.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/counters/striped/ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/counters/striped/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/counters/striped/TestStripedCounters.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/pipeline/ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/pipeline/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestCase3.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/messages/ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestWORMStrategyNoCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestWORMStrategyOneCacheBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/HABranch.txt branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/TestHAWORMStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/TestHAWritePipeline.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/TestJournalHA.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/force-vs-sync.txt branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/AbstractQuorumTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/TestHA3QuorumSemantics.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/TestMockQuorumFixture.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/TestSingletonQuorumSemantics.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/ha/ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/zk/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/journal/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/journal/ha/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/journal/ha/zk/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/OrderedSetDifference.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/QuorumPipelineState.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/QuorumServiceState.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/QuorumTokenState.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/UnorderedSetDifference.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorum.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumImpl.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/journal/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/journal/ha/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/journal/ha/zk/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/journal/ha/zk/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/AbstractZkQuorumTestCase.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/MockQuorumMember.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/MockServiceRegistrar.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/TestSetDifference.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/TestZkHA3QuorumSemantics.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/TestZkQuorum.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/TestZkSingletonQuorumSemantics.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/zookeeper/TestEphemeralSemantics.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/DateTimeExtension.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsInline.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsLiteral.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DumpStore.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/Q14Test.java Removed Paths: ------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/counters/striped/StripedCounters.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/HACommitGlue.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/HAGlue.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/HAGlueBase.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/HAPipelineGlue.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/HAReadGlue.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumCommit.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumRead.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumReadImpl.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/package.html branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/pipeline/ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/pipeline/HAWriteMessageBase.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/pipeline/package.html branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/IWriteCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/WriteCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/WriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/IWriteCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/package.html branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/ha/HAWriteMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/ha/package.html branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/AbstractQuorumClient.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/AbstractQuorumMember.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/AsynchronousQuorumCloseException.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/Quorum.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumActor.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumClient.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumEvent.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumEventEnum.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumException.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumListener.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumMember.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumStateChangeListener.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumStateChangeListenerBase.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/QuorumWatcher.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/quorum/package.html branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/BlobAllocator.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/Config.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/DirectOutputStream.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/ICommitCallback.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/LockFile.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/PSInputStream.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/WriteBlock.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/service/IWritePipeline.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/counters/striped/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/counters/striped/TestStripedCounters.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/pipeline/ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/pipeline/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestWriteCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestWriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ReplicatedStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ReplicatedStoreService.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/HABranch.txt branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/TestHAWORMStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/TestHAWritePipeline.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/TestJournalHA.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/force-vs-sync.txt branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/AbstractQuorumTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/TestHA3QuorumSemantics.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/TestMockQuorumFixture.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/TestSingletonQuorumSemantics.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/ha/ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/quorum/zk/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/journal/ha/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/journal/ha/zk/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/OrderedSetDifference.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/QuorumPipelineState.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/QuorumServiceState.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/QuorumTokenState.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/UnorderedSetDifference.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorum.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumImpl.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/zookeeper/ZNodeLockWatcher.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/journal/ha/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/journal/ha/zk/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/journal/ha/zk/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/AbstractZkQuorumTestCase.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/MockQuorumMember.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/MockServiceRegistrar.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/TestSetDifference.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/TestZkHA3QuorumSemantics.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/TestZkQuorum.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/quorum/zk/TestZkSingletonQuorumSemantics.java Property Changed: ---------------- branches/QUADS_QUERY_BRANCH/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/bigdata/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/attr/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/disco/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/util/config/ branches/QUADS_QUERY_BRANCH/bigdata-perf/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/lib/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/java/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/java/benchmark/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/java/benchmark/generator/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/java/benchmark/model/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/java/benchmark/qualification/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/java/benchmark/serializer/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/java/benchmark/testdriver/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/java/benchmark/tools/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/java/benchmark/vocabulary/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/resources/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/resources/bsbm-data/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/resources/bsbm-data/queries/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/resources/logging/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/test/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/test/benchmark/ branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/test/benchmark/bigdata/ branches/QUADS_QUERY_BRANCH/bigdata-perf/btc/ branches/QUADS_QUERY_BRANCH/bigdata-perf/btc/src/resources/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/LEGAL/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/lib/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/java/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/java/edu/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/swat/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/uba/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/api/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/bigdata/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/resources/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/resources/answers (U1)/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/resources/config/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/resources/logging/ branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm/src/resources/scripts/ branches/QUADS_QUERY_BRANCH/bigdata-perf/uniprot/ branches/QUADS_QUERY_BRANCH/bigdata-perf/uniprot/src/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/relation/rule/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/samples/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/samples/com/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/samples/com/bigdata/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/samples/com/bigdata/rdf/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/samples/com/bigdata/rdf/internal/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/relation/rule/ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/QUADS_QUERY_BRANCH/dsi-utils/ branches/QUADS_QUERY_BRANCH/dsi-utils/src/java/ branches/QUADS_QUERY_BRANCH/dsi-utils/src/java/it/ branches/QUADS_QUERY_BRANCH/dsi-utils/src/java/it/unimi/ branches/QUADS_QUERY_BRANCH/dsi-utils/src/java/it/unimi/dsi/ branches/QUADS_QUERY_BRANCH/dsi-utils/src/java/it/unimi/dsi/compression/ branches/QUADS_QUERY_BRANCH/dsi-utils/src/java/it/unimi/dsi/io/ branches/QUADS_QUERY_BRANCH/dsi-utils/src/java/it/unimi/dsi/util/ branches/QUADS_QUERY_BRANCH/dsi-utils/src/test/ branches/QUADS_QUERY_BRANCH/dsi-utils/src/test/it/ branches/QUADS_QUERY_BRANCH/dsi-utils/src/test/it/unimi/ branches/QUADS_QUERY_BRANCH/dsi-utils/src/test/it/unimi/dsi/ branches/QUADS_QUERY_BRANCH/dsi-utils/src/test/it/unimi/dsi/io/ branches/QUADS_QUERY_BRANCH/dsi-utils/src/test/it/unimi/dsi/util/ branches/QUADS_QUERY_BRANCH/osgi/ branches/QUADS_QUERY_BRANCH/src/resources/bin/config/ branches/QUADS_QUERY_BRANCH/src/resources/config/ Property changes on: branches/QUADS_QUERY_BRANCH ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3659-4061 + /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 Modified: branches/QUADS_QUERY_BRANCH/.project =================================================================== --- branches/QUADS_QUERY_BRANCH/.project 2011-01-09 15:38:34 UTC (rev 4068) +++ branches/QUADS_QUERY_BRANCH/.project 2011-01-09 20:58:02 UTC (rev 4069) @@ -1,6 +1,6 @@ <?xml version="1.0" encoding="UTF-8"?> <projectDescription> - <name>bigdata</name> + <name>bigdata-quads-clean-for-merge</name> <comment></comment> <projects> </projects> Copied: branches/QUADS_QUERY_BRANCH/bigdata/src/architecture/RWStore.xls (from rev 4066, branches/JOURNAL_HA_BRANCH/bigdata/src/architecture/RWStore.xls) =================================================================== (Binary files differ) Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/architecture/mergePriority.xls =================================================================== (Binary files differ) Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/AbstractBTree.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/AbstractBTree.java 2011-01-09 15:38:34 UTC (rev 4068) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/AbstractBTree.java 2011-01-09 20:58:02 UTC (rev 4069) @@ -3350,6 +3350,8 @@ * @todo Actually, I think that this is just a fence post in ringbuffer * beforeOffer() method and the code might work without the synchronized * block if the fence post was fixed. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/201 */ synchronized (this) { @@ -3682,6 +3684,7 @@ // write the serialized node or leaf onto the store. final long addr; + final long oldAddr; { final long begin = System.nanoTime(); @@ -3691,7 +3694,9 @@ // now we have a new address, delete previous identity if any if (node.isPersistent()) { - store.delete(node.getIdentity()); + oldAddr = node.getIdentity(); + } else { + oldAddr = 0; } btreeCounters.writeNanos += System.nanoTime() - begin; @@ -3708,6 +3713,13 @@ */ node.setIdentity(addr); + if (oldAddr != 0L) { + if (storeCache!=null) { + // remove from cache. + storeCache.remove(oldAddr); + } + store.delete(oldAddr); + } node.setDirty(false); @@ -3821,9 +3833,10 @@ assert tmp.position() == 0; - assert tmp.limit() == store.getByteCount(addr) : "limit=" - + tmp.limit() + ", byteCount(addr)=" - + store.getByteCount(addr)+", addr="+store.toString(addr); + // Note: This assertion is invalidated when checksums are inlined in the store records. +// assert tmp.limit() == store.getByteCount(addr) : "limit=" +// + tmp.limit() + ", byteCount(addr)=" +// + store.getByteCount(addr)+", addr="+store.toString(addr); btreeCounters.readNanos.addAndGet( System.nanoTime() - begin ); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BTree.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BTree.java 2011-01-09 15:38:34 UTC (rev 4068) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BTree.java 2011-01-09 20:58:02 UTC (rev 4069) @@ -39,6 +39,7 @@ import com.bigdata.journal.ICommitter; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.Name2Addr; +import com.bigdata.journal.RWStrategy; import com.bigdata.journal.Name2Addr.Entry; import com.bigdata.mdi.IResourceMetadata; import com.bigdata.mdi.JournalMetadata; @@ -1173,8 +1174,21 @@ assertNotReadOnly(); - if (getIndexMetadata().getDeleteMarkers()) { - + /* + * FIXME Per https://sourceforge.net/apps/trac/bigdata/ticket/221, we + * should special case this for the RWStore when delete markers are not + * enabled and just issue deletes against each node and leave in the + * BTree. This could be done using a post-order traversal of the nodes + * and leaves such that the parent is not removed from the store until + * its children have been removed. The deletes should be low-level + * IRawStore#delete(addr) invocations without maintenance to the B+Tree + * data structures. Afterwards replaceRootWithEmptyLeaf() should be + * invoked to discard the hard reference ring buffer and associate a new + * root leaf with the B+Tree. + */ + if (getIndexMetadata().getDeleteMarkers() + || getStore() instanceof RWStrategy) { + /* * Write deletion markers for each non-deleted entry. When the * transaction commits, those delete markers will have to validate Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BytesUtil.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BytesUtil.java 2011-01-09 15:38:34 UTC (rev 4068) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BytesUtil.java 2011-01-09 20:58:02 UTC (rev 4069) @@ -27,6 +27,7 @@ import it.unimi.dsi.io.InputBitStream; import it.unimi.dsi.io.OutputBitStream; +import java.nio.ByteBuffer; import java.util.Comparator; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -1019,4 +1020,81 @@ static final private Pattern PATTERN_BYTE_COUNT = Pattern.compile( "([0-9]+)(k|kb|m|mb|g|gb)?", Pattern.CASE_INSENSITIVE); + /** + * Return a byte[] having the data in the {@link ByteBuffer} from the + * {@link ByteBuffer#position()} to the {@link ByteBuffer#limit()}. The + * position, limit, and mark are not affected by this operation. When the + * {@link ByteBuffer} has a backing array, the array offset is ZERO (0), and + * the {@link ByteBuffer#limit()} is equal to the + * {@link ByteBuffer#capacity()} then the backing array is returned. + * Otherwise, a new byte[] is allocated and the data are copied into that + * byte[], which is then returned. + * + * @param b + * The {@link ByteBuffer}. + * + * @return The byte[]. + */ + static public byte[] toArray(final ByteBuffer b) { + + return toArray(b, false/* forceCopy */); + + } + + /** + * Return a byte[] having the data in the {@link ByteBuffer} from the + * {@link ByteBuffer#position()} to the {@link ByteBuffer#limit()}. The + * position, limit, and mark are not affected by this operation. + * <p> + * Under certain circumstances it is possible and may be desirable to return + * the backing {@link ByteBuffer#array}. This behavior is enabled by + * <code>forceCopy := false</code>. + * <p> + * It is possible to return the backing byte[] when the {@link ByteBuffer} + * has a backing array, the array offset is ZERO (0), and the + * {@link ByteBuffer#limit()} is equal to the {@link ByteBuffer#capacity()} + * then the backing array is returned. Otherwise, a new byte[] must be + * allocated, and the data are copied into that byte[], which may then be + * returned. + * + * @param b + * The {@link ByteBuffer}. + * @param forceCopy + * When <code>false</code>, the backing array will be returned if + * possible. + * + * @return The byte[]. + */ + static public byte[] toArray(final ByteBuffer b, final boolean forceCopy) { + + if (b.hasArray() && b.arrayOffset() == 0 && b.position() == 0) { + +// && b.limit() == b.capacity() + + final byte[] a = b.array(); + + if (a.length == b.limit()) { + + return a; + + } + + } + + /* + * Copy the data into a byte[] using a read-only view on the buffer so + * that we do not mess with its position, mark, or limit. + */ + final ByteBuffer tmp = b.asReadOnlyBuffer(); + + final int len = tmp.remaining(); + + final byte[] a = new byte[len]; + + tmp.get(a); + + return a; + + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexMetadata.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2011-01-09 15:38:34 UTC (rev 4068) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2011-01-09 20:58:02 UTC (rev 4069) @@ -271,7 +271,7 @@ /** * A reasonable maximum branching factor for a {@link BTree}. */ - int MAX_BTREE_BRANCHING_FACTOR = 1024; + int MAX_BTREE_BRANCHING_FACTOR = 4196; /** * A reasonable maximum branching factor for an {@link IndexSegment}. Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentBuilder.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentBuilder.java 2011-01-09 15:38:34 UTC (rev 4068) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentBuilder.java 2011-01-09 20:58:02 UTC (rev 4069) @@ -59,7 +59,7 @@ import com.bigdata.io.FileChannelUtility; import com.bigdata.io.NOPReopener; import com.bigdata.io.SerializerUtil; -import com.bigdata.io.WriteCache; +import com.bigdata.io.writecache.WriteCache; import com.bigdata.journal.Journal; import com.bigdata.journal.Name2Addr; import com.bigdata.journal.TemporaryRawStore; @@ -71,6 +71,7 @@ import com.bigdata.rawstore.IBlock; import com.bigdata.rawstore.IRawStore; import com.bigdata.rawstore.WormAddressManager; +import com.bigdata.util.ChecksumUtility; /** * Builds an {@link IndexSegment} given a source btree and a target branching @@ -374,6 +375,32 @@ * The bloom filter iff we build one (errorRate != 0.0). */ final IBloomFilter bloomFilter; + + /** + * When <code>true</code> record level checksums will be used in the + * generated file. + * + * FIXME This can not be enabled until we factor out the direct use of the + * {@link WriteCache} since special handling is otherwise required to ensure + * that the checksum makes it into the output record when we write directly + * on the disk. + * + * FIXME When enabling this, make sure that the bloom filter, + * {@link IndexMetadata}, and the blobs are all checksummed and make sure + * that the {@link IndexSegmentStore} verifies the checksums when it reads + * through to the disk and only returns the raw record w/o the trailing + * checksum. + * + * FIXME The right time to reconcile these things may be when this branch + * (HAJournal) is merged with the dynamic shard refactor branch. + */ + final private boolean useChecksums = false; + + /** + * Used to compute record level checksums when {@link #useChecksums} is + * <code>true</code>. + */ + final private ChecksumUtility checker = new ChecksumUtility(); /** * The file on which the {@link IndexSegment} is written. The file is closed @@ -1183,7 +1210,10 @@ throw new IllegalArgumentException(); final long begin_setup = System.... [truncated message content] |
From: <tho...@us...> - 2011-01-09 15:38:40
|
Revision: 4068 http://bigdata.svn.sourceforge.net/bigdata/?rev=4068&view=rev Author: thompsonbry Date: 2011-01-09 15:38:34 +0000 (Sun, 09 Jan 2011) Log Message: ----------- test_transferAllFrom() is hanging under OS X. I have modified the unit test to detect the OS X platform and fail immediately rather than hanging while I get the JOURNAL_HA_BRANCH/QUADS_QUERY_BRANCH merge running. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestFileChannelUtility.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestFileChannelUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestFileChannelUtility.java 2011-01-09 15:37:22 UTC (rev 4067) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestFileChannelUtility.java 2011-01-09 15:38:34 UTC (rev 4068) @@ -35,6 +35,8 @@ import java.nio.channels.FileChannel; import java.util.Random; +import org.apache.system.SystemUtil; + import com.bigdata.btree.BytesUtil; import com.bigdata.rawstore.Bytes; @@ -348,6 +350,13 @@ */ public void test_transferAllFrom() throws IOException { + if(SystemUtil.isOSX()) { + /* + * FIXME For some reason, this unit test is hanging under OS X. + */ + fail("Unit test hangs under OS X"); + } + final File sourceFile = File.createTempFile("TestFileChannelUtility", getName()); sourceFile.deleteOnExit(); @@ -358,7 +367,7 @@ final RandomAccessFile source = new RandomAccessFile(sourceFile, "rw"); - final RandomAccessFile target = new RandomAccessFile(sourceFile, "rw"); + final RandomAccessFile target = new RandomAccessFile(targetFile, "rw"); try { @@ -377,6 +386,7 @@ // write ground truth onto the file. FileChannelUtility.writeAll(source.getChannel(), ByteBuffer .wrap(expected), 0L/* pos */); + target.setLength(FILE_SIZE); // do a bunch of trials of random transfers. for(int trial=0; trial<1000; trial++) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-09 15:37:29
|
Revision: 4067 http://bigdata.svn.sourceforge.net/bigdata/?rev=4067&view=rev Author: thompsonbry Date: 2011-01-09 15:37:22 +0000 (Sun, 09 Jan 2011) Log Message: ----------- Added support to recognized OS X to SystemUtil. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/org/apache/system/SystemUtil.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/org/apache/system/SystemUtil.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/org/apache/system/SystemUtil.java 2011-01-07 14:15:16 UTC (rev 4066) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/org/apache/system/SystemUtil.java 2011-01-09 15:37:22 UTC (rev 4067) @@ -40,6 +40,7 @@ private static final String m_osVersion; private static final boolean m_windows; private static final boolean m_linux; + private static final boolean m_osx; private static final Logger log = Logger.getLogger(SystemUtil.class); @@ -82,6 +83,7 @@ m_cpuInfo = info; m_windows = SystemUtil.operatingSystem().startsWith("Windows"); m_linux = SystemUtil.operatingSystem().startsWith("Linux"); + m_osx = SystemUtil.operatingSystem().contains("OS X"); if(log.isInfoEnabled()) { log.info("architecture: "+m_architecture); @@ -176,5 +178,11 @@ return m_linux; } + + public static final boolean isOSX() { + + return m_osx; + + } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-07 14:15:22
|
Revision: 4066 http://bigdata.svn.sourceforge.net/bigdata/?rev=4066&view=rev Author: thompsonbry Date: 2011-01-07 14:15:16 +0000 (Fri, 07 Jan 2011) Log Message: ----------- Removed some dependencies which had been introduced by Martyn for the RESTful RDF repository stuff which MikeP developed. The apache httpclient libraries were already present in the bigdata-sails/lib directory. Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/.classpath Removed Paths: ------------- branches/JOURNAL_HA_BRANCH/bigdata/lib/apache/commons-httpclient-3.1/ branches/JOURNAL_HA_BRANCH/bigdata/lib/apache/commons-httpclient-3.1.jar branches/JOURNAL_HA_BRANCH/bigdata/lib/apache/commons-httpclient-3.1.tar.gz branches/JOURNAL_HA_BRANCH/bigdata/lib/apache/httpclient-4.0.1.jar branches/JOURNAL_HA_BRANCH/bigdata/lib/apache/httpmime-4.0.1.jar Modified: branches/JOURNAL_HA_BRANCH/.classpath =================================================================== --- branches/JOURNAL_HA_BRANCH/.classpath 2011-01-06 21:02:44 UTC (rev 4065) +++ branches/JOURNAL_HA_BRANCH/.classpath 2011-01-07 14:15:16 UTC (rev 4066) @@ -16,11 +16,11 @@ <classpathentry kind="src" path="bigdata/src/samples"/> <classpathentry kind="src" path="dsi-utils/src/test"/> <classpathentry kind="lib" path="bigdata-jini/lib/apache/zookeeper-3.2.1.jar"/> - <classpathentry kind="lib" path="bigdata-sails/lib/commons-httpclient.jar"/> <classpathentry kind="lib" path="bigdata-sails/lib/servlet-api.jar"/> <classpathentry kind="lib" path="bigdata/lib/dsi-utils-1.0.6-020610.jar"/> <classpathentry kind="lib" path="bigdata/lib/lgpl-utils-1.0.6-020610.jar"/> <classpathentry kind="lib" path="bigdata-rdf/lib/nxparser-6-22-2010.jar"/> + <classpathentry kind="lib" path="bigdata-sails/lib/commons-httpclient.jar"/> <classpathentry kind="src" path="lgpl-utils/src/java"/> <classpathentry kind="src" path="lgpl-utils/src/test"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/icu/icu4j-3_6.jar"/> Deleted: branches/JOURNAL_HA_BRANCH/bigdata/lib/apache/commons-httpclient-3.1.jar =================================================================== (Binary files differ) Deleted: branches/JOURNAL_HA_BRANCH/bigdata/lib/apache/commons-httpclient-3.1.tar.gz =================================================================== (Binary files differ) Deleted: branches/JOURNAL_HA_BRANCH/bigdata/lib/apache/httpclient-4.0.1.jar =================================================================== (Binary files differ) Deleted: branches/JOURNAL_HA_BRANCH/bigdata/lib/apache/httpmime-4.0.1.jar =================================================================== (Binary files differ) This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-06 21:02:54
|
Revision: 4065 http://bigdata.svn.sourceforge.net/bigdata/?rev=4065&view=rev Author: thompsonbry Date: 2011-01-06 21:02:44 +0000 (Thu, 06 Jan 2011) Log Message: ----------- Merge trunk to QUADS_QUERY_BRANCH [r3658:r4061]. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java branches/QUADS_QUERY_BRANCH/bigdata-perf/README.txt branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataStatementImpl.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexRemover.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexWriteProc.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexWriter.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/StatementWriter.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/vocab/BaseVocabulary.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepositoryConnection.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java branches/QUADS_QUERY_BRANCH/build.xml Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata-compatibility/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/bigdata/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexMutation.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java Removed Paths: ------------- branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/bigdata/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java Property Changed: ---------------- branches/QUADS_QUERY_BRANCH/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/attr/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/disco/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/util/config/ branches/QUADS_QUERY_BRANCH/bigdata-perf/ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/QUADS_QUERY_BRANCH/dsi-utils/LEGAL/ branches/QUADS_QUERY_BRANCH/dsi-utils/lib/ branches/QUADS_QUERY_BRANCH/dsi-utils/src/ branches/QUADS_QUERY_BRANCH/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/QUADS_QUERY_BRANCH/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/QUADS_QUERY_BRANCH/osgi/ Property changes on: branches/QUADS_QUERY_BRANCH ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 + /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3659-4061 Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java 2011-01-06 20:52:15 UTC (rev 4064) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java 2011-01-06 21:02:44 UTC (rev 4065) @@ -39,6 +39,7 @@ import java.io.ObjectInput; import java.io.ObjectOutput; import java.io.OutputStream; +import java.util.Arrays; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -795,18 +796,34 @@ IResultHandler<ResultBitBuffer, ResultBitBuffer> { private final boolean[] results; + + /** + * I added this so I could encode information about tuple modification + * that takes more than one boolean to encode. For example, SPOs can + * be: INSERTED, REMOVED, UPDATED, NO_OP (2 bits). + */ + private final int multiplier; + private final AtomicInteger onCount = new AtomicInteger(); public ResultBitBufferHandler(final int nkeys) { + + this(nkeys, 1); + + } + + public ResultBitBufferHandler(final int nkeys, final int multiplier) { - results = new boolean[nkeys]; + results = new boolean[nkeys*multiplier]; + this.multiplier = multiplier; } public void aggregate(final ResultBitBuffer result, final Split split) { - System.arraycopy(result.getResult(), 0, results, split.fromIndex, - split.ntuples); + System.arraycopy(result.getResult(), 0, results, + split.fromIndex*multiplier, + split.ntuples*multiplier); onCount.addAndGet(result.getOnCount()); Deleted: branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java =================================================================== --- trunk/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java 2011-01-06 20:13:43 UTC (rev 4061) +++ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java 2011-01-06 21:02:44 UTC (rev 4065) @@ -1,276 +0,0 @@ -/* - -Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ -/* - * Created on Nov 19, 2010 - */ -package com.bigdata.journal; - -import java.io.File; -import java.io.IOException; -import java.util.Properties; -import java.util.UUID; - -import junit.framework.TestCase2; - -import com.bigdata.Banner; -import com.bigdata.btree.IIndex; -import com.bigdata.btree.IndexMetadata; - -/** - * Test suite for binary compatibility, portability, and forward compatibility - * or automated migration of persistent stores and persistence or serialization - * capable objects across different bigdata releases. The tests in this suite - * rely on artifacts which are archived within SVN. - * - * @todo create w/ small extent and truncate (RW store does not support - * truncate). - * - * @todo test binary migration and forward compatibility. - * - * @todo stubs to create and organize artifacts,etc. - * - * @todo data driven test suite? - * - * @todo create artifact for each release, name the artifacts systematically, - * e.g., test.release.(RW|WORM).jnl or test.release.seg. Collect a list of - * the created artifacts and run each test against each of the versions of - * the artifact. - * - * @todo Force artifact file name case for file system compatibility? - * - * @todo test journal (WORM and RW), btree, index segment, row store, persistent - * data structures (checkpoints, index metadata, tuple serializers, etc.), - * RDF layer, RMI message formats, etc. - * - * @todo Specific tests for - * <p> - * Name2Addr and DefaultKeyBuilderFactory portability problem. See - * https://sourceforge.net/apps/trac/bigdata/ticket/193 - * <p> - * WORM global row store resolution problem introduced in the - * JOURNAL_HA_BRANCH. See - * https://sourceforge.net/apps/trac/bigdata/ticket/171#comment:5 - * <p> - * Sparse row store JDK encoding problem: - * https://sourceforge.net/apps/trac/bigdata/ticket/107 - */ -public class TestBinaryCompatibility extends TestCase2 { - - /** - * - */ - public TestBinaryCompatibility() { - } - - /** - * @param name - */ - public TestBinaryCompatibility(String name) { - super(name); - } - - /** - * @todo munge the release version into a name that is compatibility with - * the file system ("." to "_"). Store artifacts at each release? At - * each release in which an incompatibility is introduced? At each - * release in which a persistence capable data structure or change is - * introduced? - */ - static protected final File artifactDir = new File( - "bigdata-compatibility/src/resources/artifacts"); - - protected static class Version { - private final String version; - private final String revision; - public Version(String version,String revision) { - this.version = version; - this.revision = revision; - } - - /** - * The bigdata version number associated with the release. This is in - * the form <code>xx.yy.zz</code> - */ - public String getVersion() { - return version; - } - - /** - * The SVN repository revision associated with the release. This is in - * the form <code>####</code>. - */ - public String getRevision() { - return revision; - } - } - - /** - * Known release versions. - */ - protected static Version V_0_83_2 = new Version("0.83.2", "3349"); - - /** - * Tested Versions. - */ - protected Version[] versions = new Version[] { - V_0_83_2 - }; - - protected void setUp() throws Exception { - - Banner.banner(); - - super.setUp(); - - if (!artifactDir.exists()) { - - if (!artifactDir.mkdirs()) { - - throw new IOException("Could not create: " + artifactDir); - - } - - } - - for (Version version : versions) { - - final File versionDir = new File(artifactDir, version.getVersion()); - - if (!versionDir.exists()) { - - if (!versionDir.mkdirs()) { - - throw new IOException("Could not create: " + versionDir); - - } - - } - - } - - } - - protected void tearDown() throws Exception { - - super.tearDown(); - - } - - /** - * @throws Throwable - * - * @todo Each 'test' should run an instance of a class which knows how to - * create the appropriate artifacts and how to test them. - */ - public void test_WORM_compatibility_with_JOURNAL_HA_BRANCH() - throws Throwable { - - final Version version = V_0_83_2; - - final File versionDir = new File(artifactDir, version.getVersion()); - - final File artifactFile = new File(versionDir, getName() - + BufferMode.DiskWORM + Journal.Options.JNL); - - if (!artifactFile.exists()) { - - createArtifact(artifactFile); - - } - - verifyArtifact(artifactFile); - - } - - protected void createArtifact(final File artifactFile) throws Throwable { - - if (log.isInfoEnabled()) - log.info("Creating: " + artifactFile); - - final Properties properties = new Properties(); - - properties.setProperty(Journal.Options.FILE, artifactFile.toString()); - - properties.setProperty(Journal.Options.INITIAL_EXTENT, "" - + Journal.Options.minimumInitialExtent); - - final Journal journal = new Journal(properties); - - try { - - final IndexMetadata md = new IndexMetadata(UUID.randomUUID()); - - final IIndex ndx = journal.registerIndex("kb.spo.SPO", md); - - ndx.insert(1,1); - - journal.commit(); - - // reduce to minimum footprint. - journal.truncate(); - - } catch (Throwable t) { - - journal.destroy(); - - throw new RuntimeException(t); - - } finally { - - if (journal.isOpen()) - journal.close(); - - } - - } - - protected void verifyArtifact(final File artifactFile) throws Throwable { - - if (log.isInfoEnabled()) - log.info("Verifying: " + artifactFile); - - final Properties properties = new Properties(); - - properties.setProperty(Journal.Options.FILE, artifactFile.toString()); - - final Journal journal = new Journal(properties); - - try { - - final IIndex ndx = journal.getIndex("kb.spo.SPO"); - - assertNotNull(ndx); - - assertEquals(1,ndx.lookup(1)); - - } finally { - - journal.close(); - - } - - } - -} Copied: branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java (from rev 4061, trunk/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java) =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java 2011-01-06 21:02:44 UTC (rev 4065) @@ -0,0 +1,276 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +/* + * Created on Nov 19, 2010 + */ +package com.bigdata.journal; + +import java.io.File; +import java.io.IOException; +import java.util.Properties; +import java.util.UUID; + +import junit.framework.TestCase2; + +import com.bigdata.Banner; +import com.bigdata.btree.IIndex; +import com.bigdata.btree.IndexMetadata; + +/** + * Test suite for binary compatibility, portability, and forward compatibility + * or automated migration of persistent stores and persistence or serialization + * capable objects across different bigdata releases. The tests in this suite + * rely on artifacts which are archived within SVN. + * + * @todo create w/ small extent and truncate (RW store does not support + * truncate). + * + * @todo test binary migration and forward compatibility. + * + * @todo stubs to create and organize artifacts,etc. + * + * @todo data driven test suite? + * + * @todo create artifact for each release, name the artifacts systematically, + * e.g., test.release.(RW|WORM).jnl or test.release.seg. Collect a list of + * the created artifacts and run each test against each of the versions of + * the artifact. + * + * @todo Force artifact file name case for file system compatibility? + * + * @todo test journal (WORM and RW), btree, index segment, row store, persistent + * data structures (checkpoints, index metadata, tuple serializers, etc.), + * RDF layer, RMI message formats, etc. + * + * @todo Specific tests for + * <p> + * Name2Addr and DefaultKeyBuilderFactory portability problem. See + * https://sourceforge.net/apps/trac/bigdata/ticket/193 + * <p> + * WORM global row store resolution problem introduced in the + * JOURNAL_HA_BRANCH. See + * https://sourceforge.net/apps/trac/bigdata/ticket/171#comment:5 + * <p> + * Sparse row store JDK encoding problem: + * https://sourceforge.net/apps/trac/bigdata/ticket/107 + */ +public class TestBinaryCompatibility extends TestCase2 { + + /** + * + */ + public TestBinaryCompatibility() { + } + + /** + * @param name + */ + public TestBinaryCompatibility(String name) { + super(name); + } + + /** + * @todo munge the release version into a name that is compatibility with + * the file system ("." to "_"). Store artifacts at each release? At + * each release in which an incompatibility is introduced? At each + * release in which a persistence capable data structure or change is + * introduced? + */ + static protected final File artifactDir = new File( + "bigdata-compatibility/src/resources/artifacts"); + + protected static class Version { + private final String version; + private final String revision; + public Version(String version,String revision) { + this.version = version; + this.revision = revision; + } + + /** + * The bigdata version number associated with the release. This is in + * the form <code>xx.yy.zz</code> + */ + public String getVersion() { + return version; + } + + /** + * The SVN repository revision associated with the release. This is in + * the form <code>####</code>. + */ + public String getRevision() { + return revision; + } + } + + /** + * Known release versions. + */ + protected static Version V_0_83_2 = new Version("0.83.2", "3349"); + + /** + * Tested Versions. + */ + protected Version[] versions = new Version[] { + V_0_83_2 + }; + + protected void setUp() throws Exception { + + Banner.banner(); + + super.setUp(); + + if (!artifactDir.exists()) { + + if (!artifactDir.mkdirs()) { + + throw new IOException("Could not create: " + artifactDir); + + } + + } + + for (Version version : versions) { + + final File versionDir = new File(artifactDir, version.getVersion()); + + if (!versionDir.exists()) { + + if (!versionDir.mkdirs()) { + + throw new IOException("Could not create: " + versionDir); + + } + + } + + } + + } + + protected void tearDown() throws Exception { + + super.tearDown(); + + } + + /** + * @throws Throwable + * + * @todo Each 'test' should run an instance of a class which knows how to + * create the appropriate artifacts and how to test them. + */ + public void test_WORM_compatibility_with_JOURNAL_HA_BRANCH() + throws Throwable { + + final Version version = V_0_83_2; + + final File versionDir = new File(artifactDir, version.getVersion()); + + final File artifactFile = new File(versionDir, getName() + + BufferMode.DiskWORM + Journal.Options.JNL); + + if (!artifactFile.exists()) { + + createArtifact(artifactFile); + + } + + verifyArtifact(artifactFile); + + } + + protected void createArtifact(final File artifactFile) throws Throwable { + + if (log.isInfoEnabled()) + log.info("Creating: " + artifactFile); + + final Properties properties = new Properties(); + + properties.setProperty(Journal.Options.FILE, artifactFile.toString()); + + properties.setProperty(Journal.Options.INITIAL_EXTENT, "" + + Journal.Options.minimumInitialExtent); + + final Journal journal = new Journal(properties); + + try { + + final IndexMetadata md = new IndexMetadata(UUID.randomUUID()); + + final IIndex ndx = journal.registerIndex("kb.spo.SPO", md); + + ndx.insert(1,1); + + journal.commit(); + + // reduce to minimum footprint. + journal.truncate(); + + } catch (Throwable t) { + + journal.destroy(); + + throw new RuntimeException(t); + + } finally { + + if (journal.isOpen()) + journal.close(); + + } + + } + + protected void verifyArtifact(final File artifactFile) throws Throwable { + + if (log.isInfoEnabled()) + log.info("Verifying: " + artifactFile); + + final Properties properties = new Properties(); + + properties.setProperty(Journal.Options.FILE, artifactFile.toString()); + + final Journal journal = new Journal(properties); + + try { + + final IIndex ndx = journal.getIndex("kb.spo.SPO"); + + assertNotNull(ndx); + + assertEquals(1,ndx.lookup(1)); + + } finally { + + journal.close(); + + } + + } + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/attr ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-jini/src/java/com/bigdata/attr:3369-3423 + /trunk/bigdata-jini/src/java/com/bigdata/attr:3369-3423,3659-4061 Property changes on: branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/disco ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-jini/src/java/com/bigdata/disco:3369-3423 + /trunk/bigdata-jini/src/java/com/bigdata/disco:3369-3423,3659-4061 Property changes on: branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/util/config ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-jini/src/java/com/bigdata/util/config:3369-3423 + /trunk/bigdata-jini/src/java/com/bigdata/util/config:3369-3423,3659-4061 Property changes on: branches/QUADS_QUERY_BRANCH/bigdata-perf ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-perf:3369-3423 + /trunk/bigdata-perf:3369-3423,3659-4061 Modified: branches/QUADS_QUERY_BRANCH/bigdata-perf/README.txt =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-perf/README.txt 2011-01-06 20:52:15 UTC (rev 4064) +++ branches/QUADS_QUERY_BRANCH/bigdata-perf/README.txt 2011-01-06 21:02:44 UTC (rev 4065) @@ -1,2 +1,6 @@ This module contains drivers for a variety of data sets and benchmarks used as -part of a performance test suite. \ No newline at end of file +part of a performance test suite. + +Note: You must run "ant bundleJar" in the top-level directory first. This will +build the bigdata code base and bundle together the various dependencies so they +will be available for the ant scripts in this module. Deleted: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2011-01-06 20:13:43 UTC (rev 4061) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2011-01-06 21:02:44 UTC (rev 4065) @@ -1,98 +0,0 @@ -package com.bigdata.rdf.changesets; - -import java.util.Comparator; -import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.spo.SPOComparator; - -public class ChangeRecord implements IChangeRecord { - - private final ISPO stmt; - - private final ChangeAction action; - -// private final StatementEnum oldType; - - public ChangeRecord(final ISPO stmt, final ChangeAction action) { - -// this(stmt, action, null); -// -// } -// -// public ChangeRecord(final BigdataStatement stmt, final ChangeAction action, -// final StatementEnum oldType) { -// - this.stmt = stmt; - this.action = action; -// this.oldType = oldType; - - } - - public ChangeAction getChangeAction() { - - return action; - - } - -// public StatementEnum getOldStatementType() { -// -// return oldType; -// -// } - - public ISPO getStatement() { - - return stmt; - - } - - @Override - public boolean equals(Object o) { - - if (o == this) - return true; - - if (o == null || o instanceof IChangeRecord == false) - return false; - - final IChangeRecord rec = (IChangeRecord) o; - - final ISPO stmt2 = rec.getStatement(); - - // statements are equal - if (stmt == stmt2 || - (stmt != null && stmt2 != null && stmt.equals(stmt2))) { - - // actions are equal - return action == rec.getChangeAction(); - - } - - return false; - - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - - sb.append(action).append(": ").append(stmt); - - return sb.toString(); - - } - - public static final Comparator<IChangeRecord> COMPARATOR = - new Comparator<IChangeRecord>() { - - public int compare(final IChangeRecord r1, final IChangeRecord r2) { - - final ISPO spo1 = r1.getStatement(); - final ISPO spo2 = r2.getStatement(); - - return SPOComparator.INSTANCE.compare(spo1, spo2); - - } - - }; - -} Copied: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java (from rev 4061, trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java) =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2011-01-06 21:02:44 UTC (rev 4065) @@ -0,0 +1,98 @@ +package com.bigdata.rdf.changesets; + +import java.util.Comparator; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.SPOComparator; + +public class ChangeRecord implements IChangeRecord { + + private final ISPO stmt; + + private final ChangeAction action; + +// private final StatementEnum oldType; + + public ChangeRecord(final ISPO stmt, final ChangeAction action) { + +// this(stmt, action, null); +// +// } +// +// public ChangeRecord(final BigdataStatement stmt, final ChangeAction action, +// final StatementEnum oldType) { +// + this.stmt = stmt; + this.action = action; +// this.oldType = oldType; + + } + + public ChangeAction getChangeAction() { + + return action; + + } + +// public StatementEnum getOldStatementType() { +// +// return oldType; +// +// } + + public ISPO getStatement() { + + return stmt; + + } + + @Override + public boolean equals(Object o) { + + if (o == this) + return true; + + if (o == null || o instanceof IChangeRecord == false) + return false; + + final IChangeRecord rec = (IChangeRecord) o; + + final ISPO stmt2 = rec.getStatement(); + + // statements are equal + if (stmt == stmt2 || + (stmt != null && stmt2 != null && stmt.equals(stmt2))) { + + // actions are equal + return action == rec.getChangeAction(); + + } + + return false; + + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + + sb.append(action).append(": ").append(stmt); + + return sb.toString(); + + } + + public static final Comparator<IChangeRecord> COMPARATOR = + new Comparator<IChangeRecord>() { + + public int compare(final IChangeRecord r1, final IChangeRecord r2) { + + final ISPO spo1 = r1.getStatement(); + final ISPO spo2 = r2.getStatement(); + + return SPOComparator.INSTANCE.compare(spo1, spo2); + + } + + }; + +} Deleted: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2011-01-06 20:13:43 UTC (rev 4061) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2011-01-06 21:02:44 UTC (rev 4065) @@ -1,38 +0,0 @@ -package com.bigdata.rdf.changesets; - -/** - * Provides detailed information on changes made to statements in the database. - * Change records are generated for any statements that are used in - * addStatement() or removeStatements() operations on the SAIL connection, as - * well as any inferred statements that are added or removed as a result of - * truth maintenance when the database has inference enabled. Change records - * will be sent to an instance of this class via the - * {@link #changeEvent(IChangeRecord)} method. These events will - * occur on an ongoing basis as statements are added to or removed from the - * indices. It is the change log's responsibility to collect change records. - * When the transaction is actually committed (or aborted), the change log will - * receive notification via {@link #transactionCommited()} or - * {@link #transactionAborted()}. - */ -public interface IChangeLog { - - /** - * Occurs when a statement add or remove is flushed to the indices (but - * not yet committed). - * - * @param record - * the {@link IChangeRecord} - */ - void changeEvent(final IChangeRecord record); - - /** - * Occurs when the current SAIL transaction is committed. - */ - void transactionCommited(); - - /** - * Occurs if the current SAIL transaction is aborted. - */ - void transactionAborted(); - -} Copied: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java (from rev 4061, trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java) =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2011-01-06 21:02:44 UTC (rev 4065) @@ -0,0 +1,38 @@ +package com.bigdata.rdf.changesets; + +/** + * Provides detailed information on changes made to statements in the database. + * Change records are generated for any statements that are used in + * addStatement() or removeStatements() operations on the SAIL connection, as + * well as any inferred statements that are added or removed as a result of + * truth maintenance when the database has inference enabled. Change records + * will be sent to an instance of this class via the + * {@link #changeEvent(IChangeRecord)} method. These events will + * occur on an ongoing basis as statements are added to or removed from the + * indices. It is the change log's responsibility to collect change records. + * When the transaction is actually committed (or aborted), the change log will + * receive notification via {@link #transactionCommited()} or + * {@link #transactionAborted()}. + */ +public interface IChangeLog { + + /** + * Occurs when a statement add or remove is flushed to the indices (but + * not yet committed). + * + * @param record + * the {@link IChangeRecord} + */ + void changeEvent(final IChangeRecord record); + + /** + * Occurs when the current SAIL transaction is committed. + */ + void transactionCommited(); + + /** + * Occurs if the current SAIL transaction is aborted. + */ + void transactionAborted(); + +} Deleted: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2011-01-06 20:13:43 UTC (rev 4061) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2011-01-06 21:02:44 UTC (rev 4065) @@ -1,120 +0,0 @@ -package com.bigdata.rdf.changesets; - -import com.bigdata.rdf.model.BigdataStatement; -import com.bigdata.rdf.model.StatementEnum; -import com.bigdata.rdf.spo.ISPO; - -/** - * Provides detailed information on changes made to statements in the database. - * Change records are generated for any statements that are used in - * addStatement() or removeStatements() operations on the SAIL connection, as - * well as any inferred statements that are added or removed as a result of - * truth maintenance when the database has inference enabled. - * <p> - * See {@link IChangeLog}. - */ -public interface IChangeRecord { - - /** - * Attempting to add or remove statements can have a number of different - * effects. This enum captures the different actions that can take place as - * a result of trying to add or remove a statement from the database. - */ - public enum ChangeAction { - - /** - * The focus statement was not in the database before and will be - * in the database after the commit. This can be the result of either - * explicit addStatement() operations on the SAIL connection, or from - * new inferences being generated via truth maintenance when the - * database has inference enabled. If the focus statement has a - * statement type of explicit then it was added via an addStatement() - * operation. If the focus statement has a statement type of inferred - * then it was added via truth maintenance. - */ - INSERTED, - - /** - * The focus statement was in the database before and will not - * be in the database after the commit. When the database has inference - * and truth maintenance enabled, the statement that is the focus of - * this change record was either an explicit statement that was the - * subject of a removeStatements() operation on the connection, or it - * was an inferred statement that was removed as a result of truth - * maintenance. Either way, the statement is no longer provable as an - * inference using other statements still in the database after the - * commit. If it were still provable, the explicit statement would have - * had its type changed to inferred, and the inferred statement would - * have remained untouched by truth maintenance. If an inferred - * statement was the subject of a removeStatement() operation on the - * connection it would have resulted in a no-op, since inferences can - * only be removed via truth maintenance. - */ - REMOVED, - - /** - * This change action can only occur when inference and truth - * maintenance are enabled on the database. Sometimes an attempt at - * statement addition or removal via an addStatement() or - * removeStatements() operation on the connection will result in a type - * change rather than an actual assertion or deletion. When in - * inference mode, statements can have one of three statement types: - * explicit, inferred, or axiom (see {@link StatementEnum}). There are - * several reasons why a statement will change type rather than be - * asserted or deleted: - * <p> - * <ul> - * <li> A statement is asserted, but already exists in the database as - * an inference or an axiom. The existing statement will have its type - * changed from inference or axiom to explicit. </li> - * <li> An explicit statement is retracted, but is still provable by - * other means. It will have its type changed from explicit to - * inference. </li> - * <li> An explicit statement is retracted, but is one of the axioms - * needed for inference. It will have its type changed from explicit to - * axiom. </li> - * </ul> - */ - UPDATED, - -// /** -// * This change action can occur for one of two reasons: -// * <p> -// * <ul> -// * <li> A statement is asserted, but already exists in the database as -// * an explicit statement. </li> -// * <li> An inferred statement or an axiom is retracted. Only explicit -// * statements can be retracted via removeStatements() operations. </li> -// * </ul> -// */ -// NO_OP - - } - - /** - * Return the ISPO that is the focus of this change record. - * - * @return - * the {@link ISPO} - */ - ISPO getStatement(); - - /** - * Return the change action for this change record. - * - * @return - * the {@link ChangeAction} - */ - ChangeAction getChangeAction(); - -// /** -// * If the change action is {@link ChangeAction#TYPE_CHANGE}, this method -// * will return the old statement type of the focus statement. The -// * new statement type is available on the focus statement itself. -// * -// * @return -// * the old statement type of the focus statement -// */ -// StatementEnum getOldStatementType(); - -} Copied: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java (from rev 4061, trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java) =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2011-01-06 21:02:44 UTC (rev 4065) @@ -0,0 +1,120 @@ +package com.bigdata.rdf.changesets; + +import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.model.StatementEnum; +import com.bigdata.rdf.spo.ISPO; + +/** + * Provides detailed information on changes made to statements in the database. + * Change records are generated for any statements that are used in + * addStatement() or removeStatements() operations on the SAIL connection, as + * well as any inferred statements that are added or removed as a result of + * truth maintenance when the database has inference enabled. + * <p> + * See {@link IChangeLog}. + */ +public interface IChangeRecord { + + /** + * Attempting to add or remove statements can have a number of different + * effects. This enum captures the different actions that can take place as + * a result of trying to add or remove a statement from the database. + */ + public enum ChangeAction { + + /** + * The focus statement was not in the database before and will be + * in the database after the commit. This can be the result of either + * explicit addStatement() operations on the SAIL connection, or from + * new inferences being generated via truth maintenance when the + * database has inference enabled. If the focus statement has a + * statement type of explicit then it was added via an addStatement() + * operation. If the focus statement has a statement type of inferred + * then it was added via truth maintenance. + */ + INSERTED, + + /** + * The focus statement was in the database before and will not + * be in the database after the commit. When the database has inference + * and truth maintenance enabled, the statement that is the focus of + * this change record was either an explicit statement that was the + * subject of a removeStatements() operation on the connection, or it + * was an inferred statement that was removed as a result of truth + * maintenance. Either way, the statement is no longer provable as an + * inference using other statements still in the database after the + * commit. If it were still provable, the explicit statement would have + * had its type changed to inferred, and the inferred statement would + * have remained untouched by truth maintenance. If an inferred + * statement was the subject of a removeStatement() operation on the + * connection it would have resulted in a no-op, since inferences can + * only be removed via truth maintenance. + */ + REMOVED, + + /** + * This change action can only occur when inference and truth + * maintenance are enabled on the database. Sometimes an attempt at + * statement addition or removal via an addStatement() or + * removeStatements() operation on the connection will result in a type + * change rather than an actual assertion or deletion. When in + * inference mode, statements can have one of three statement types: + * explicit, inferred, or axiom (see {@link StatementEnum}). There are + * several reasons why a statement will change type rather than be + * asserted or deleted: + * <p> + * <ul> + * <li> A statement is asserted, but already exists in the database as + * an inference or an axiom. The existing statement will have its type + * changed from inference or axiom to explicit. </li> + * <li> An explicit statement is retracted, but is still provable by + * other means. It will have its type changed from explicit to + * inference. </li> + * <li> An explicit statement is retracted, but is one of the axioms + * needed for inference. It will have its type changed from explicit to + * axiom. </li> + * </ul> + */ + UPDATED, + +// /** +// * This change action can occur for one of two reasons: +// * <p> +// * <ul> +// * <li> A statement is asserted, but already exists in the database as +// * an explicit statement. </li> +// * <li> An inferred statement or an axiom is retracted. Only explicit +// * statements can be retracted via removeStatements() operations. </li> +// * </ul> +// */ +// NO_OP + + } + + /** + * Return the ISPO that is the focus of this change record. + * + * @return + * the {@link ISPO} + */ + ISPO getStatement(); + + /** + * Return the change action for this change record. + * + * @return + * the {@link ChangeAction} + */ + ChangeAction getChangeAction(); + +// /** +// * If the change action is {@link ChangeAction#TYPE_CHANGE}, this method +// * will return the old statement type of the focus statement. The +// * new statement type is available on the focus statement itself. +// * +// * @return +// * the old statement type of the focus statement +// */ +// StatementEnum getOldStatementType(); + +} Deleted: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2011-01-06 20:13:43 UTC (rev 4061) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2011-01-06 21:02:44 UTC (rev 4065) @@ -1,163 +0,0 @@ -package com.bigdata.rdf.changesets; - -import java.util.Collection; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.Map; -import org.apache.log4j.Logger; -import com.bigdata.rdf.model.BigdataStatement; -import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.store.AbstractTripleStore; -import com.bigdata.rdf.store.BigdataStatementIterator; -import com.bigdata.striterator.ChunkedArrayIterator; - -/** - * This is a very simple implementation of a change log. NOTE: This is not - * a particularly great implementation. First of all it ends up storing - * two copies of the change set. Secondly it needs to be smarter about - * concurrency, or maybe we can be smart about it when we do the - * implementation on the other side (the SAIL connection can just write - * change events to a buffer and then the buffer can be drained by - * another thread that doesn't block the actual read/write operations, - * although then we need to be careful not to issue the committed() - * notification before the buffer is drained). - * - * @author mike - * - */ -public class InMemChangeLog implements IChangeLog { - - protected static final Logger log = Logger.getLogger(InMemChangeLog.class); - - /** - * Running tally of new changes since the last commit notification. - */ - private final Map<ISPO,IChangeRecord> changeSet = - new HashMap<ISPO, IChangeRecord>(); - - /** - * Keep a record of the change set as of the last commit. - */ - private final Map<ISPO,IChangeRecord> committed = - new HashMap<ISPO, IChangeRecord>(); - - /** - * See {@link IChangeLog#changeEvent(IChangeRecord)}. - */ - public synchronized void changeEvent(final IChangeRecord record) { - - if (log.isInfoEnabled()) - log.info(record); - - changeSet.put(record.getStatement(), record); - - } - - /** - * See {@link IChangeLog#transactionCommited()}. - */ - public synchronized void transactionCommited() { - - if (log.isInfoEnabled()) - log.info("transaction committed"); - - committed.clear(); - - committed.putAll(changeSet); - - changeSet.clear(); - - } - - /** - * See {@link IChangeLog#transactionAborted()}. - */ - public synchronized void transactionAborted() { - - if (log.isInfoEnabled()) - log.info("transaction aborted"); - - changeSet.clear(); - - } - - /** - * Return the change set as of the last commmit point. - * - * @return - * a collection of {@link IChangeRecord}s as of the last commit - * point - */ - public Collection<IChangeRecord> getLastCommit() { - - return committed.values(); - - } - - /** - * Return the change set as of the last commmit point, using the supplied - * database to resolve ISPOs to BigdataStatements. - * - * @return - * a collection of {@link IChangeRecord}s as of the last commit - * point - */ - public Collection<IChangeRecord> getLastCommit(final AbstractTripleStore db) { - - return resolve(db, committed.values()); - - } - - /** - * Use the supplied database to turn a set of ISPO change records into - * BigdataStatement change records. BigdataStatements also implement - * ISPO, the difference being that BigdataStatements also contain - * materialized RDF terms for the 3 (or 4) positions, in addition to just - * the internal identifiers (IVs) for those terms. - * - * @param db - * the database containing the lexicon needed to materialize - * the BigdataStatement objects - * @param unresolved - * the ISPO change records that came from IChangeLog notification - * events - * @return - * the fully resolves BigdataStatement change records - */ - private Collection<IChangeRecord> resolve(final AbstractTripleStore db, - final Collection<IChangeRecord> unresolved) { - - final Collection<IChangeRecord> resolved = - new LinkedList<IChangeRecord>(); - - // collect up the ISPOs out of the unresolved change records - final ISPO[] spos = new ISPO[unresolved.size()]; - int i = 0; - for (IChangeRecord rec : unresolved) { - spos[i++] = rec.getStatement(); - } - - // use the database to resolve them into BigdataStatements - final BigdataStatementIterator it = - db.asStatementIterator( - new ChunkedArrayIterator<ISPO>(i, spos, null/* keyOrder */)); - - /* - * the BigdataStatementIterator will produce BigdataStatement objects - * in the same order as the original ISPO array - */ - for (IChangeRecord rec : unresolved) { - - final BigdataStatement stmt = it.next(); - - resolved.add(new ChangeRecord(stmt, rec.getChangeAction())); - - } - - return resolved; - - } - - - -} Copied: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java (from rev 4061, trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java) =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2011-01-06 21:02:44 UTC (rev 4065) @@ -0,0 +1,163 @@ +package com.bigdata.rdf.changesets; + +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Map; +import org.apache.log4j.Logger; +import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.store.BigdataStatementIterator; +import com.bigdata.striterator.ChunkedArrayIterator; + +/** + * This is a very simple implementation of a change log. NOTE: This is not + * a particularly great implementation. First of all it ends up storing + * two copies of the change set. Secondly it needs to be smarter about + * concurrency, or maybe we can be smart about it when we do the + * implementation on the other side (the SAIL connection can just write + * change events to a buffer and then the buffer can be drained by + * another thread that doesn't block the actual read/write operations, + * although then we need to be careful not to issue the committed() + * notification before the buffer is drained). + * + * @author mike + * + */ +public class InMemChangeLog implements IChangeLog { + + protected static final Logger log = Logger.getLogger(InMemChangeLog.class); + + /** + * Running tally of new changes since the last commit notification. + */ + private final Map<ISPO,IChangeRecord> changeSet = + new HashMap<ISPO, IChangeRecord>(); + + /** + * Keep a record of the change set as of the last commit. + */ + private final Map<ISPO,IChangeRecord> committed = + new HashMap<ISPO, IChangeRecord>(); + + /** + * See {@link IChangeLog#changeEvent(IChangeRecord)}. + */ + public synchronized void changeEvent(final IChangeRecord record) { + + if (log.isInfoEnabled()) + log.info(record); + + changeSet.put(record.getStatement(), record); + + } + + /** + * See {@link IChangeLog#transactionCommited()}. + */ + public synchronized void transactionCommited() { + + if (log.isInfoEnabled()) + log.info("transaction committed"); + + committed.clear(); + + committed.putAll(changeSet); + + changeSet.clear(); + + } + + /** + * See {@link IChangeLog#transactionAborted()}. + */ + public synchronized void transactionAborted() { + + if (log.isInfoEnabled()) + log.info("transaction aborted"); + + changeSet.clear(); + + } + + /** + * Return the change set as of the last commmit point. + * + * @return + * a collection of {@link IChangeRecord}s as of the last commit + * point + */ + public Collection<IChangeRecord> getLastCommit() { + + return committed.values(); + + } + + /** + * Return the change set as of the last commmit point, using the supplied + * database to resolve ISPOs to BigdataStatements. + * + * @return + * a collection of {@link IChangeRecord}s as of the last commit + * point + */ + public Collection<IChangeRecord> getLastCommit(final AbstractTripleStore db) { + + return resolve(db, committed.values()); + + } + + /** + * Use the supplied database to turn a set of ISPO change records into + * BigdataStatement change records. BigdataStatements also implement + * ISPO, the difference being that BigdataStatements also contain + * materialized RDF terms for the 3 (or 4) positions, in addition to just + * the internal identifiers (IVs) for those terms. + * + * @param db + * the database containing the lexicon needed to materialize + * the BigdataStatement objects + * @param unresolved + * the ISPO change records that came from IChangeLog notification + * events + * @return + * the fully resolves BigdataStatement change records + */ + private Collection<IChangeRecord> resolve(final AbstractTripleStore db, + final Collection<IChangeRecord> unresolved) { + + final Collection<IChangeRecord> resolved = + new LinkedList<IChangeRecord>(); + + // collect up the ISPOs out of the unresolved change records + final ISPO[] spos = new ISPO[unresolved.size()]; + int i = 0; + for (IChangeRecord rec : unresolved) { + spos[i++] = rec.getStatement(); + } + + // use the database to resolve them into BigdataStatements + final BigdataStatementIterator it = + db.asStatementIterator( + new ChunkedArrayIterator<ISPO>(i, spos, null/* keyOrder */)); + + /* + * the BigdataStatementIterator will produce BigdataStatement objects + * in the same order as the original ISPO array + */ + for (IChangeRecord rec : unresolved) { + + final BigdataStatement stmt = it.next(); + + resolved.add(new ChangeRecord(stmt, rec.getChangeAction())); + + } + + return resolved; + + } + + + +} Deleted: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java 2011-01-06 20:13:43 UTC (rev 4061) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java 2011-01-06 21:02:44 UTC (rev 4065) @@ -1,208 +0,0 @@ -package com.bigdata.rdf.changesets; - -import java.util.Iterator; -import java.util.Map; -import org.apache.log4j.Logger; -import com.bigdata.rdf.changesets.IChangeRecord.ChangeAction; -import com.bigdata.rdf.internal.IV; -import com.bigdata.rdf.model.BigdataBNode; -import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.spo.SPO; -import com.bigdata.rdf.spo.ISPO.ModifiedEnum; -import com.bigdata.rdf.store.AbstractTripleStore; -import com.bigdata.relation.accesspath.IElementFilter; -import com.bigdata.striterator.ChunkedArrayIterator; -import com.bigdata.striterator.IChunkedOrderedIterator; - -public class StatementWriter { - - protected static final Logger log = Logger.getLogger(StatementWriter.class); - - public static long addStatements(final AbstractTripleStore database, - final AbstractTripleStore statementStore, - final boolean copyOnly, - final IElementFilter<ISPO> filter, - final IChunkedOrderedIterator<ISPO> itr, - final IChangeLog changeLog) { - - long n = 0; - - if (itr.hasNext()) { - -// final BigdataStatementIteratorImpl itr2 = -// new BigdataStatementIteratorImpl(database, bnodes, itr) -// .start(database.getExecutorService()); -// -// final BigdataStatement[] stmts = -// new BigdataStatement[database.getChunkCapacity()]; - final SPO[] stmts = new SPO[database.getChunkCapacity()]; - - int i = 0; - while ((i = nextChunk(itr, stmts)) > 0) { - n += addStatements(database, statementStore, copyOnly, filter, - stmts, i, changeLog); - } - - } - - return n; - - } - - private static long addStatements(final AbstractTripleStore database, - final AbstractTripleStore statementStore, - final boolean copyOnly, - final IElementFilter<ISPO> filter, - final ISPO[] stmts, - final int numStmts, - final IChangeLog changeLog) { - -// final SPO[] tmp = allocateSPOs(stmts, numStmts); - - final long n = database.addStatements(statementStore, copyOnly, - new ChunkedArrayIterator<ISPO>(numStmts, stmts, - null/* keyOrder */), filter); - - // Copy the state of the isModified() flag and notify changeLog - for (int i = 0; i < numStmts; i++) { - - if (stmts[i].isModified()) { - -// stmts[i].setModified(true); - - if (changeLog != null) { - - switch(stmts[i].getModified()) { - case INSERTED: - changeLog.cha... [truncated message content] |
From: <tho...@us...> - 2011-01-06 20:52:21
|
Revision: 4064 http://bigdata.svn.sourceforge.net/bigdata/?rev=4064&view=rev Author: thompsonbry Date: 2011-01-06 20:52:15 +0000 (Thu, 06 Jan 2011) Log Message: ----------- Removed @Override tags. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTableMetadata.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HashBucket.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HashDirectory.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTableMetadata.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTableMetadata.java 2011-01-06 20:51:03 UTC (rev 4063) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTableMetadata.java 2011-01-06 20:52:15 UTC (rev 4064) @@ -90,14 +90,12 @@ } - @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { // TODO Auto-generated method stub throw new UnsupportedOperationException(); } - @Override public void writeExternal(ObjectOutput out) throws IOException { // TODO Auto-generated method stub throw new UnsupportedOperationException(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HashBucket.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HashBucket.java 2011-01-06 20:51:03 UTC (rev 4063) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HashBucket.java 2011-01-06 20:52:15 UTC (rev 4064) @@ -473,24 +473,20 @@ } - @Override public boolean hasNext() { return current < size; } - @Override public Integer next() { return entries[current++]; } - @Override public void remove() { throw new UnsupportedOperationException(); } } - @Override public void delete() throws IllegalStateException { // TODO Auto-generated method stub throw new UnsupportedOperationException(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HashDirectory.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HashDirectory.java 2011-01-06 20:51:03 UTC (rev 4063) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HashDirectory.java 2011-01-06 20:52:15 UTC (rev 4064) @@ -513,7 +513,6 @@ } - @Override public void delete() throws IllegalStateException { // TODO Auto-generated method stub throw new UnsupportedOperationException(); @@ -616,7 +615,6 @@ } - @Override public HashBucket next() { if (!hasNext()) @@ -630,7 +628,6 @@ } - @Override public void remove() { throw new UnsupportedOperationException(); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-06 20:51:09
|
Revision: 4063 http://bigdata.svn.sourceforge.net/bigdata/?rev=4063&view=rev Author: thompsonbry Date: 2011-01-06 20:51:03 +0000 (Thu, 06 Jan 2011) Log Message: ----------- Removed @Override tags. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/MutableBucketData.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/MutableBucketData.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/MutableBucketData.java 2011-01-06 20:49:27 UTC (rev 4062) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/MutableBucketData.java 2011-01-06 20:51:03 UTC (rev 4063) @@ -175,127 +175,106 @@ } - @Override public int getHash(int index) { // TODO Auto-generated method stub return 0; } - @Override public int getKeyCount() { // TODO Auto-generated method stub return 0; } - @Override public int getLengthMSB() { // TODO Auto-generated method stub return 0; } - @Override public Iterator<Integer> hashIterator(int h) { // TODO Auto-generated method stub return null; } - @Override public boolean getDeleteMarker(int index) { // TODO Auto-generated method stub return false; } - @Override public long getNextAddr() { // TODO Auto-generated method stub return 0; } - @Override public long getPriorAddr() { // TODO Auto-generated method stub return 0; } - @Override public int getValueCount() { // TODO Auto-generated method stub return 0; } - @Override public IRaba getValues() { // TODO Auto-generated method stub return null; } - @Override public long getVersionTimestamp(int index) { // TODO Auto-generated method stub return 0; } - @Override public boolean hasDeleteMarkers() { // TODO Auto-generated method stub return false; } - @Override public boolean hasVersionTimestamps() { // TODO Auto-generated method stub return false; } - @Override public boolean isDoubleLinked() { // TODO Auto-generated method stub return false; } - @Override public AbstractFixedByteArrayBuffer data() { // TODO Auto-generated method stub return null; } - @Override public IRaba getKeys() { // TODO Auto-generated method stub return null; } - @Override public long getMaximumVersionTimestamp() { // TODO Auto-generated method stub return 0; } - @Override public long getMinimumVersionTimestamp() { // TODO Auto-generated method stub return 0; } - @Override public int getSpannedTupleCount() { // TODO Auto-generated method stub return 0; } - @Override public boolean isCoded() { // TODO Auto-generated method stub return false; } - @Override public boolean isLeaf() { // TODO Auto-generated method stub return false; } - @Override public boolean isReadOnly() { // TODO Auto-generated method stub return false; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-06 20:49:33
|
Revision: 4062 http://bigdata.svn.sourceforge.net/bigdata/?rev=4062&view=rev Author: thompsonbry Date: 2011-01-06 20:49:27 +0000 (Thu, 06 Jan 2011) Log Message: ----------- Removed @Override tags. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HashTree.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HashTree.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HashTree.java 2011-01-06 20:13:43 UTC (rev 4061) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HashTree.java 2011-01-06 20:49:27 UTC (rev 4062) @@ -672,25 +672,21 @@ * ISimpleBTree */ - @Override public boolean contains(byte[] key) { // TODO Auto-generated method stub return false; } - @Override public byte[] insert(byte[] key, byte[] value) { // TODO Auto-generated method stub return null; } - @Override public byte[] lookup(byte[] key) { // TODO Auto-generated method stub return null; } - @Override public byte[] remove(byte[] key) { // TODO Auto-generated method stub return null; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-06 20:13:53
|
Revision: 4061 http://bigdata.svn.sourceforge.net/bigdata/?rev=4061&view=rev Author: thompsonbry Date: 2011-01-06 20:13:43 +0000 (Thu, 06 Jan 2011) Log Message: ----------- Merge trunk to JOURNAL_HA_BRANCH [r3895:HEAD]. This merge brings in the change set API for the SAIL. Unit test failures in the HA branch at this time include: - TestChangeSets throws UnsupportedOperationException when running with quads. - TestNamedGraphs#testSearchQuery() fails with TestBigdataSailWithQuads (but not in the trunk). - TestBigdataSailEvaluationStrategy#test_free_text_search() fails with TestBigdataSailWithQuads, TestBigdataSailWithSids, and TestBigdataSailWithoutSids. These failures do not exist in the trunk. - BigdataSparqlTest#dataset-01, 03, 05, 06, 07, 08, 11, 12b sail with TestBigdataSailWithQuads (these test failures exist in the trunk as well). The text search related test errors were likely introduced in the JOURNAL_HA_BRANCH with some recent extensions to the SAIL free text search API. MikeP will look into these errors. Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/JOURNAL_HA_BRANCH/bigdata/src/resources/logging/log4j.properties branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataStatementImpl.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexRemover.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexWriteProc.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexWriter.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/StatementWriter.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/JOURNAL_HA_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/JOURNAL_HA_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepositoryConnection.java branches/JOURNAL_HA_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java branches/JOURNAL_HA_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java Added Paths: ----------- branches/JOURNAL_HA_BRANCH/bigdata-compatibility/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexMutation.java branches/JOURNAL_HA_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/JOURNAL_HA_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java Removed Paths: ------------- branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java Property Changed: ---------------- branches/JOURNAL_HA_BRANCH/ branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/attr/ branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/disco/ branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/util/config/ branches/JOURNAL_HA_BRANCH/bigdata-perf/ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/JOURNAL_HA_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/JOURNAL_HA_BRANCH/dsi-utils/src/java/it/ branches/JOURNAL_HA_BRANCH/dsi-utils/src/test/it/unimi/ branches/JOURNAL_HA_BRANCH/osgi/ Property changes on: branches/JOURNAL_HA_BRANCH ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/bugfix-btm:2594-2779 /trunk:2763-2785,2918-2980,3392-3437,3656-3894 + /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/bugfix-btm:2594-2779 /trunk:2763-2785,2918-2980,3392-3437,3656-3894,3896-4059 Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java 2011-01-06 19:38:57 UTC (rev 4060) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -39,6 +39,7 @@ import java.io.ObjectInput; import java.io.ObjectOutput; import java.io.OutputStream; +import java.util.Arrays; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -795,18 +796,34 @@ IResultHandler<ResultBitBuffer, ResultBitBuffer> { private final boolean[] results; + + /** + * I added this so I could encode information about tuple modification + * that takes more than one boolean to encode. For example, SPOs can + * be: INSERTED, REMOVED, UPDATED, NO_OP (2 bits). + */ + private final int multiplier; + private final AtomicInteger onCount = new AtomicInteger(); public ResultBitBufferHandler(final int nkeys) { + + this(nkeys, 1); + + } + + public ResultBitBufferHandler(final int nkeys, final int multiplier) { - results = new boolean[nkeys]; + results = new boolean[nkeys*multiplier]; + this.multiplier = multiplier; } public void aggregate(final ResultBitBuffer result, final Split split) { - System.arraycopy(result.getResult(), 0, results, split.fromIndex, - split.ntuples); + System.arraycopy(result.getResult(), 0, results, + split.fromIndex*multiplier, + split.ntuples*multiplier); onCount.addAndGet(result.getOnCount()); Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2011-01-06 19:38:57 UTC (rev 4060) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -88,22 +88,10 @@ import com.bigdata.relation.locator.IResourceLocator; import com.bigdata.resources.ResourceManager; import com.bigdata.rwstore.IAllocationContext; -import com.bigdata.service.DataService; -import com.bigdata.service.EmbeddedClient; -import com.bigdata.service.IBigdataClient; -import com.bigdata.service.IBigdataFederation; -import com.bigdata.service.jini.JiniClient; import com.bigdata.util.ChecksumUtility; /** * <p> -<<<<<<< .working - * The journal is an append-only persistence capable data structure supporting - * atomic commit, named indices, and transactions. Writes are logically appended - * to the journal to minimize disk head movement. - * </p> - * <p> -======= * The journal is a persistence capable data structure supporting atomic commit, * named indices, and full transactions. The {@link BufferMode#DiskRW} mode * provides an persistence scheme based on reusable allocation slots while the @@ -111,50 +99,13 @@ * Journals may be configured in highly available quorums. * </p> * <p> ->>>>>>> .merge-right.r3391 * This class is an abstract implementation of the {@link IJournal} interface * that does not implement the {@link IConcurrencyManager}, -<<<<<<< .working - * {@link IResourceManager}, or {@link ITransactionService} interfaces. There - * are several classes which DO support all of these features, relying on the - * {@link AbstractJournal} for their underlying persistence store. These - * include: - * <dl> - * <dt>{@link Journal}</dt> - * <dd>A concrete implementation that may be used for a standalone immortal - * database complete with concurrency control and transaction management.</dd> - * <dt>{@link DataService}</dt> - * <dd>A class supporting remote clients, key-range partitioned indices, - * concurrency, and scale-out.</dd> - * <dt>{@link IBigdataClient}</dt> - * <dd>Clients connect to an {@link IBigdataFederation}, which is the basis for - * the scale-out architecture. There are several variants of a federation - * available, including: - * <dl> - * <dt>{@link LocalDataServiceClient}</dt> - * <dd>Purely local operations against a {@link DataService} with full - * concurrency controls and transaction management</dd> - * <dt>{@link EmbeddedClient}</dt> - * <dd>Operations against a collection of services running in the same JVM with - * full concurrency controls, transaction management, and key-range partitioned - * indices.</dd> - * <dt>{@link JiniClient}</dt> - * <dd>Operations against a collection of services running on a distributed - * services framework such as Jini with full concurrency controls, transaction - * management, and key-range partitioned indices. This is the scale-out - * solution.</dd> - * </dl> - * </dd> - * </dl> - * </p> - * <h2>Limitations</h2> -======= * {@link IResourceManager}, or {@link ITransactionService} interfaces. The * {@link Journal} provides a concrete implementation that may be used for a * standalone database complete with concurrency control and transaction * management. * </p> <h2>Limitations</h2> ->>>>>>> .merge-right.r3391 * <p> * The {@link IIndexStore} implementation on this class is NOT thread-safe. The * basic limitation is that the mutable {@link BTree} is NOT thread-safe. The Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/resources/logging/log4j.properties =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/resources/logging/log4j.properties 2011-01-06 19:38:57 UTC (rev 4060) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/resources/logging/log4j.properties 2011-01-06 20:13:43 UTC (rev 4061) @@ -56,6 +56,7 @@ #log4j.logger.com.bigdata.io.WriteCacheService=TRACE #log4j.logger.com.bigdata.journal.AbstractBufferStrategy=TRACE #log4j.logger.com.bigdata.resources=INFO +#log4j.logger.com.bigdata.rwstore.RWStore=TRACE log4j.logger.com.bigdata.journal.ha.HAServer=ALL log4j.logger.com.bigdata.journal.ha.HAConnect=ALL log4j.logger.com.bigdata.journal.ha.SocketMessage=ALL @@ -64,7 +65,7 @@ #log4j.logger.com.bigdata.journal.Name2Addr=INFO #log4j.logger.com.bigdata.journal.AbstractTask=INFO #log4j.logger.com.bigdata.journal.WriteExecutorService=INFO -#log4j.logger.com.bigdata.service.AbstractTransactionService=INFO +#log4j.logger.com.bigdata.service.AbstractTransactionService=TRACE #log4j.logger.com.bigdata.journal.AbstractLocalTransactionManager=INFO log4j.logger.com.bigdata.concurrent.TxDag=WARN log4j.logger.com.bigdata.concurrent.NonBlockingLockManager=WARN Deleted: branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java =================================================================== --- trunk/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java 2011-01-05 22:42:01 UTC (rev 4059) +++ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -1,276 +0,0 @@ -/* - -Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ -/* - * Created on Nov 19, 2010 - */ -package com.bigdata.journal; - -import java.io.File; -import java.io.IOException; -import java.util.Properties; -import java.util.UUID; - -import junit.framework.TestCase2; - -import com.bigdata.Banner; -import com.bigdata.btree.IIndex; -import com.bigdata.btree.IndexMetadata; - -/** - * Test suite for binary compatibility, portability, and forward compatibility - * or automated migration of persistent stores and persistence or serialization - * capable objects across different bigdata releases. The tests in this suite - * rely on artifacts which are archived within SVN. - * - * @todo create w/ small extent and truncate (RW store does not support - * truncate). - * - * @todo test binary migration and forward compatibility. - * - * @todo stubs to create and organize artifacts,etc. - * - * @todo data driven test suite? - * - * @todo create artifact for each release, name the artifacts systematically, - * e.g., test.release.(RW|WORM).jnl or test.release.seg. Collect a list of - * the created artifacts and run each test against each of the versions of - * the artifact. - * - * @todo Force artifact file name case for file system compatibility? - * - * @todo test journal (WORM and RW), btree, index segment, row store, persistent - * data structures (checkpoints, index metadata, tuple serializers, etc.), - * RDF layer, RMI message formats, etc. - * - * @todo Specific tests for - * <p> - * Name2Addr and DefaultKeyBuilderFactory portability problem. See - * https://sourceforge.net/apps/trac/bigdata/ticket/193 - * <p> - * WORM global row store resolution problem introduced in the - * JOURNAL_HA_BRANCH. See - * https://sourceforge.net/apps/trac/bigdata/ticket/171#comment:5 - * <p> - * Sparse row store JDK encoding problem: - * https://sourceforge.net/apps/trac/bigdata/ticket/107 - */ -public class TestBinaryCompatibility extends TestCase2 { - - /** - * - */ - public TestBinaryCompatibility() { - } - - /** - * @param name - */ - public TestBinaryCompatibility(String name) { - super(name); - } - - /** - * @todo munge the release version into a name that is compatibility with - * the file system ("." to "_"). Store artifacts at each release? At - * each release in which an incompatibility is introduced? At each - * release in which a persistence capable data structure or change is - * introduced? - */ - static protected final File artifactDir = new File( - "bigdata-compatibility/src/resources/artifacts"); - - protected static class Version { - private final String version; - private final String revision; - public Version(String version,String revision) { - this.version = version; - this.revision = revision; - } - - /** - * The bigdata version number associated with the release. This is in - * the form <code>xx.yy.zz</code> - */ - public String getVersion() { - return version; - } - - /** - * The SVN repository revision associated with the release. This is in - * the form <code>####</code>. - */ - public String getRevision() { - return revision; - } - } - - /** - * Known release versions. - */ - protected static Version V_0_83_2 = new Version("0.83.2", "3349"); - - /** - * Tested Versions. - */ - protected Version[] versions = new Version[] { - V_0_83_2 - }; - - protected void setUp() throws Exception { - - Banner.banner(); - - super.setUp(); - - if (!artifactDir.exists()) { - - if (!artifactDir.mkdirs()) { - - throw new IOException("Could not create: " + artifactDir); - - } - - } - - for (Version version : versions) { - - final File versionDir = new File(artifactDir, version.getVersion()); - - if (!versionDir.exists()) { - - if (!versionDir.mkdirs()) { - - throw new IOException("Could not create: " + versionDir); - - } - - } - - } - - } - - protected void tearDown() throws Exception { - - super.tearDown(); - - } - - /** - * @throws Throwable - * - * @todo Each 'test' should run an instance of a class which knows how to - * create the appropriate artifacts and how to test them. - */ - public void test_WORM_compatibility_with_JOURNAL_HA_BRANCH() - throws Throwable { - - final Version version = V_0_83_2; - - final File versionDir = new File(artifactDir, version.getVersion()); - - final File artifactFile = new File(versionDir, getName() - + BufferMode.DiskWORM + Journal.Options.JNL); - - if (!artifactFile.exists()) { - - createArtifact(artifactFile); - - } - - verifyArtifact(artifactFile); - - } - - protected void createArtifact(final File artifactFile) throws Throwable { - - if (log.isInfoEnabled()) - log.info("Creating: " + artifactFile); - - final Properties properties = new Properties(); - - properties.setProperty(Journal.Options.FILE, artifactFile.toString()); - - properties.setProperty(Journal.Options.INITIAL_EXTENT, "" - + Journal.Options.minimumInitialExtent); - - final Journal journal = new Journal(properties); - - try { - - final IndexMetadata md = new IndexMetadata(UUID.randomUUID()); - - final IIndex ndx = journal.registerIndex("kb.spo.SPO", md); - - ndx.insert(1,1); - - journal.commit(); - - // reduce to minimum footprint. - journal.truncate(); - - } catch (Throwable t) { - - journal.destroy(); - - throw new RuntimeException(t); - - } finally { - - if (journal.isOpen()) - journal.close(); - - } - - } - - protected void verifyArtifact(final File artifactFile) throws Throwable { - - if (log.isInfoEnabled()) - log.info("Verifying: " + artifactFile); - - final Properties properties = new Properties(); - - properties.setProperty(Journal.Options.FILE, artifactFile.toString()); - - final Journal journal = new Journal(properties); - - try { - - final IIndex ndx = journal.getIndex("kb.spo.SPO"); - - assertNotNull(ndx); - - assertEquals(1,ndx.lookup(1)); - - } finally { - - journal.close(); - - } - - } - -} Copied: branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java (from rev 4059, trunk/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java) =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata-compatibility/src/test/com/bigdata/journal/TestBinaryCompatibility.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -0,0 +1,276 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +/* + * Created on Nov 19, 2010 + */ +package com.bigdata.journal; + +import java.io.File; +import java.io.IOException; +import java.util.Properties; +import java.util.UUID; + +import junit.framework.TestCase2; + +import com.bigdata.Banner; +import com.bigdata.btree.IIndex; +import com.bigdata.btree.IndexMetadata; + +/** + * Test suite for binary compatibility, portability, and forward compatibility + * or automated migration of persistent stores and persistence or serialization + * capable objects across different bigdata releases. The tests in this suite + * rely on artifacts which are archived within SVN. + * + * @todo create w/ small extent and truncate (RW store does not support + * truncate). + * + * @todo test binary migration and forward compatibility. + * + * @todo stubs to create and organize artifacts,etc. + * + * @todo data driven test suite? + * + * @todo create artifact for each release, name the artifacts systematically, + * e.g., test.release.(RW|WORM).jnl or test.release.seg. Collect a list of + * the created artifacts and run each test against each of the versions of + * the artifact. + * + * @todo Force artifact file name case for file system compatibility? + * + * @todo test journal (WORM and RW), btree, index segment, row store, persistent + * data structures (checkpoints, index metadata, tuple serializers, etc.), + * RDF layer, RMI message formats, etc. + * + * @todo Specific tests for + * <p> + * Name2Addr and DefaultKeyBuilderFactory portability problem. See + * https://sourceforge.net/apps/trac/bigdata/ticket/193 + * <p> + * WORM global row store resolution problem introduced in the + * JOURNAL_HA_BRANCH. See + * https://sourceforge.net/apps/trac/bigdata/ticket/171#comment:5 + * <p> + * Sparse row store JDK encoding problem: + * https://sourceforge.net/apps/trac/bigdata/ticket/107 + */ +public class TestBinaryCompatibility extends TestCase2 { + + /** + * + */ + public TestBinaryCompatibility() { + } + + /** + * @param name + */ + public TestBinaryCompatibility(String name) { + super(name); + } + + /** + * @todo munge the release version into a name that is compatibility with + * the file system ("." to "_"). Store artifacts at each release? At + * each release in which an incompatibility is introduced? At each + * release in which a persistence capable data structure or change is + * introduced? + */ + static protected final File artifactDir = new File( + "bigdata-compatibility/src/resources/artifacts"); + + protected static class Version { + private final String version; + private final String revision; + public Version(String version,String revision) { + this.version = version; + this.revision = revision; + } + + /** + * The bigdata version number associated with the release. This is in + * the form <code>xx.yy.zz</code> + */ + public String getVersion() { + return version; + } + + /** + * The SVN repository revision associated with the release. This is in + * the form <code>####</code>. + */ + public String getRevision() { + return revision; + } + } + + /** + * Known release versions. + */ + protected static Version V_0_83_2 = new Version("0.83.2", "3349"); + + /** + * Tested Versions. + */ + protected Version[] versions = new Version[] { + V_0_83_2 + }; + + protected void setUp() throws Exception { + + Banner.banner(); + + super.setUp(); + + if (!artifactDir.exists()) { + + if (!artifactDir.mkdirs()) { + + throw new IOException("Could not create: " + artifactDir); + + } + + } + + for (Version version : versions) { + + final File versionDir = new File(artifactDir, version.getVersion()); + + if (!versionDir.exists()) { + + if (!versionDir.mkdirs()) { + + throw new IOException("Could not create: " + versionDir); + + } + + } + + } + + } + + protected void tearDown() throws Exception { + + super.tearDown(); + + } + + /** + * @throws Throwable + * + * @todo Each 'test' should run an instance of a class which knows how to + * create the appropriate artifacts and how to test them. + */ + public void test_WORM_compatibility_with_JOURNAL_HA_BRANCH() + throws Throwable { + + final Version version = V_0_83_2; + + final File versionDir = new File(artifactDir, version.getVersion()); + + final File artifactFile = new File(versionDir, getName() + + BufferMode.DiskWORM + Journal.Options.JNL); + + if (!artifactFile.exists()) { + + createArtifact(artifactFile); + + } + + verifyArtifact(artifactFile); + + } + + protected void createArtifact(final File artifactFile) throws Throwable { + + if (log.isInfoEnabled()) + log.info("Creating: " + artifactFile); + + final Properties properties = new Properties(); + + properties.setProperty(Journal.Options.FILE, artifactFile.toString()); + + properties.setProperty(Journal.Options.INITIAL_EXTENT, "" + + Journal.Options.minimumInitialExtent); + + final Journal journal = new Journal(properties); + + try { + + final IndexMetadata md = new IndexMetadata(UUID.randomUUID()); + + final IIndex ndx = journal.registerIndex("kb.spo.SPO", md); + + ndx.insert(1,1); + + journal.commit(); + + // reduce to minimum footprint. + journal.truncate(); + + } catch (Throwable t) { + + journal.destroy(); + + throw new RuntimeException(t); + + } finally { + + if (journal.isOpen()) + journal.close(); + + } + + } + + protected void verifyArtifact(final File artifactFile) throws Throwable { + + if (log.isInfoEnabled()) + log.info("Verifying: " + artifactFile); + + final Properties properties = new Properties(); + + properties.setProperty(Journal.Options.FILE, artifactFile.toString()); + + final Journal journal = new Journal(properties); + + try { + + final IIndex ndx = journal.getIndex("kb.spo.SPO"); + + assertNotNull(ndx); + + assertEquals(1,ndx.lookup(1)); + + } finally { + + journal.close(); + + } + + } + +} Property changes on: branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/attr ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-jini/src/java/com/bigdata/attr:2981-3437,3656-3894 + /trunk/bigdata-jini/src/java/com/bigdata/attr:2981-3437,3656-3894,3896-4059 Property changes on: branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/disco ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-jini/src/java/com/bigdata/disco:2981-3437,3656-3894 + /trunk/bigdata-jini/src/java/com/bigdata/disco:2981-3437,3656-3894,3896-4059 Property changes on: branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/util/config ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-jini/src/java/com/bigdata/util/config:2981-3437,3656-3894 + /trunk/bigdata-jini/src/java/com/bigdata/util/config:2981-3437,3656-3894,3896-4059 Property changes on: branches/JOURNAL_HA_BRANCH/bigdata-perf ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-perf:2981-3437,3656-3894 + /trunk/bigdata-perf:2981-3437,3656-3894,3896-4059 Deleted: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2011-01-05 22:42:01 UTC (rev 4059) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -1,98 +0,0 @@ -package com.bigdata.rdf.changesets; - -import java.util.Comparator; -import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.spo.SPOComparator; - -public class ChangeRecord implements IChangeRecord { - - private final ISPO stmt; - - private final ChangeAction action; - -// private final StatementEnum oldType; - - public ChangeRecord(final ISPO stmt, final ChangeAction action) { - -// this(stmt, action, null); -// -// } -// -// public ChangeRecord(final BigdataStatement stmt, final ChangeAction action, -// final StatementEnum oldType) { -// - this.stmt = stmt; - this.action = action; -// this.oldType = oldType; - - } - - public ChangeAction getChangeAction() { - - return action; - - } - -// public StatementEnum getOldStatementType() { -// -// return oldType; -// -// } - - public ISPO getStatement() { - - return stmt; - - } - - @Override - public boolean equals(Object o) { - - if (o == this) - return true; - - if (o == null || o instanceof IChangeRecord == false) - return false; - - final IChangeRecord rec = (IChangeRecord) o; - - final ISPO stmt2 = rec.getStatement(); - - // statements are equal - if (stmt == stmt2 || - (stmt != null && stmt2 != null && stmt.equals(stmt2))) { - - // actions are equal - return action == rec.getChangeAction(); - - } - - return false; - - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - - sb.append(action).append(": ").append(stmt); - - return sb.toString(); - - } - - public static final Comparator<IChangeRecord> COMPARATOR = - new Comparator<IChangeRecord>() { - - public int compare(final IChangeRecord r1, final IChangeRecord r2) { - - final ISPO spo1 = r1.getStatement(); - final ISPO spo2 = r2.getStatement(); - - return SPOComparator.INSTANCE.compare(spo1, spo2); - - } - - }; - -} Copied: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java (from rev 4059, trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java) =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -0,0 +1,98 @@ +package com.bigdata.rdf.changesets; + +import java.util.Comparator; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.SPOComparator; + +public class ChangeRecord implements IChangeRecord { + + private final ISPO stmt; + + private final ChangeAction action; + +// private final StatementEnum oldType; + + public ChangeRecord(final ISPO stmt, final ChangeAction action) { + +// this(stmt, action, null); +// +// } +// +// public ChangeRecord(final BigdataStatement stmt, final ChangeAction action, +// final StatementEnum oldType) { +// + this.stmt = stmt; + this.action = action; +// this.oldType = oldType; + + } + + public ChangeAction getChangeAction() { + + return action; + + } + +// public StatementEnum getOldStatementType() { +// +// return oldType; +// +// } + + public ISPO getStatement() { + + return stmt; + + } + + @Override + public boolean equals(Object o) { + + if (o == this) + return true; + + if (o == null || o instanceof IChangeRecord == false) + return false; + + final IChangeRecord rec = (IChangeRecord) o; + + final ISPO stmt2 = rec.getStatement(); + + // statements are equal + if (stmt == stmt2 || + (stmt != null && stmt2 != null && stmt.equals(stmt2))) { + + // actions are equal + return action == rec.getChangeAction(); + + } + + return false; + + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + + sb.append(action).append(": ").append(stmt); + + return sb.toString(); + + } + + public static final Comparator<IChangeRecord> COMPARATOR = + new Comparator<IChangeRecord>() { + + public int compare(final IChangeRecord r1, final IChangeRecord r2) { + + final ISPO spo1 = r1.getStatement(); + final ISPO spo2 = r2.getStatement(); + + return SPOComparator.INSTANCE.compare(spo1, spo2); + + } + + }; + +} Deleted: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2011-01-05 22:42:01 UTC (rev 4059) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -1,38 +0,0 @@ -package com.bigdata.rdf.changesets; - -/** - * Provides detailed information on changes made to statements in the database. - * Change records are generated for any statements that are used in - * addStatement() or removeStatements() operations on the SAIL connection, as - * well as any inferred statements that are added or removed as a result of - * truth maintenance when the database has inference enabled. Change records - * will be sent to an instance of this class via the - * {@link #changeEvent(IChangeRecord)} method. These events will - * occur on an ongoing basis as statements are added to or removed from the - * indices. It is the change log's responsibility to collect change records. - * When the transaction is actually committed (or aborted), the change log will - * receive notification via {@link #transactionCommited()} or - * {@link #transactionAborted()}. - */ -public interface IChangeLog { - - /** - * Occurs when a statement add or remove is flushed to the indices (but - * not yet committed). - * - * @param record - * the {@link IChangeRecord} - */ - void changeEvent(final IChangeRecord record); - - /** - * Occurs when the current SAIL transaction is committed. - */ - void transactionCommited(); - - /** - * Occurs if the current SAIL transaction is aborted. - */ - void transactionAborted(); - -} Copied: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java (from rev 4059, trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java) =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -0,0 +1,38 @@ +package com.bigdata.rdf.changesets; + +/** + * Provides detailed information on changes made to statements in the database. + * Change records are generated for any statements that are used in + * addStatement() or removeStatements() operations on the SAIL connection, as + * well as any inferred statements that are added or removed as a result of + * truth maintenance when the database has inference enabled. Change records + * will be sent to an instance of this class via the + * {@link #changeEvent(IChangeRecord)} method. These events will + * occur on an ongoing basis as statements are added to or removed from the + * indices. It is the change log's responsibility to collect change records. + * When the transaction is actually committed (or aborted), the change log will + * receive notification via {@link #transactionCommited()} or + * {@link #transactionAborted()}. + */ +public interface IChangeLog { + + /** + * Occurs when a statement add or remove is flushed to the indices (but + * not yet committed). + * + * @param record + * the {@link IChangeRecord} + */ + void changeEvent(final IChangeRecord record); + + /** + * Occurs when the current SAIL transaction is committed. + */ + void transactionCommited(); + + /** + * Occurs if the current SAIL transaction is aborted. + */ + void transactionAborted(); + +} Deleted: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2011-01-05 22:42:01 UTC (rev 4059) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -1,120 +0,0 @@ -package com.bigdata.rdf.changesets; - -import com.bigdata.rdf.model.BigdataStatement; -import com.bigdata.rdf.model.StatementEnum; -import com.bigdata.rdf.spo.ISPO; - -/** - * Provides detailed information on changes made to statements in the database. - * Change records are generated for any statements that are used in - * addStatement() or removeStatements() operations on the SAIL connection, as - * well as any inferred statements that are added or removed as a result of - * truth maintenance when the database has inference enabled. - * <p> - * See {@link IChangeLog}. - */ -public interface IChangeRecord { - - /** - * Attempting to add or remove statements can have a number of different - * effects. This enum captures the different actions that can take place as - * a result of trying to add or remove a statement from the database. - */ - public enum ChangeAction { - - /** - * The focus statement was not in the database before and will be - * in the database after the commit. This can be the result of either - * explicit addStatement() operations on the SAIL connection, or from - * new inferences being generated via truth maintenance when the - * database has inference enabled. If the focus statement has a - * statement type of explicit then it was added via an addStatement() - * operation. If the focus statement has a statement type of inferred - * then it was added via truth maintenance. - */ - INSERTED, - - /** - * The focus statement was in the database before and will not - * be in the database after the commit. When the database has inference - * and truth maintenance enabled, the statement that is the focus of - * this change record was either an explicit statement that was the - * subject of a removeStatements() operation on the connection, or it - * was an inferred statement that was removed as a result of truth - * maintenance. Either way, the statement is no longer provable as an - * inference using other statements still in the database after the - * commit. If it were still provable, the explicit statement would have - * had its type changed to inferred, and the inferred statement would - * have remained untouched by truth maintenance. If an inferred - * statement was the subject of a removeStatement() operation on the - * connection it would have resulted in a no-op, since inferences can - * only be removed via truth maintenance. - */ - REMOVED, - - /** - * This change action can only occur when inference and truth - * maintenance are enabled on the database. Sometimes an attempt at - * statement addition or removal via an addStatement() or - * removeStatements() operation on the connection will result in a type - * change rather than an actual assertion or deletion. When in - * inference mode, statements can have one of three statement types: - * explicit, inferred, or axiom (see {@link StatementEnum}). There are - * several reasons why a statement will change type rather than be - * asserted or deleted: - * <p> - * <ul> - * <li> A statement is asserted, but already exists in the database as - * an inference or an axiom. The existing statement will have its type - * changed from inference or axiom to explicit. </li> - * <li> An explicit statement is retracted, but is still provable by - * other means. It will have its type changed from explicit to - * inference. </li> - * <li> An explicit statement is retracted, but is one of the axioms - * needed for inference. It will have its type changed from explicit to - * axiom. </li> - * </ul> - */ - UPDATED, - -// /** -// * This change action can occur for one of two reasons: -// * <p> -// * <ul> -// * <li> A statement is asserted, but already exists in the database as -// * an explicit statement. </li> -// * <li> An inferred statement or an axiom is retracted. Only explicit -// * statements can be retracted via removeStatements() operations. </li> -// * </ul> -// */ -// NO_OP - - } - - /** - * Return the ISPO that is the focus of this change record. - * - * @return - * the {@link ISPO} - */ - ISPO getStatement(); - - /** - * Return the change action for this change record. - * - * @return - * the {@link ChangeAction} - */ - ChangeAction getChangeAction(); - -// /** -// * If the change action is {@link ChangeAction#TYPE_CHANGE}, this method -// * will return the old statement type of the focus statement. The -// * new statement type is available on the focus statement itself. -// * -// * @return -// * the old statement type of the focus statement -// */ -// StatementEnum getOldStatementType(); - -} Copied: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java (from rev 4059, trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java) =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -0,0 +1,120 @@ +package com.bigdata.rdf.changesets; + +import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.model.StatementEnum; +import com.bigdata.rdf.spo.ISPO; + +/** + * Provides detailed information on changes made to statements in the database. + * Change records are generated for any statements that are used in + * addStatement() or removeStatements() operations on the SAIL connection, as + * well as any inferred statements that are added or removed as a result of + * truth maintenance when the database has inference enabled. + * <p> + * See {@link IChangeLog}. + */ +public interface IChangeRecord { + + /** + * Attempting to add or remove statements can have a number of different + * effects. This enum captures the different actions that can take place as + * a result of trying to add or remove a statement from the database. + */ + public enum ChangeAction { + + /** + * The focus statement was not in the database before and will be + * in the database after the commit. This can be the result of either + * explicit addStatement() operations on the SAIL connection, or from + * new inferences being generated via truth maintenance when the + * database has inference enabled. If the focus statement has a + * statement type of explicit then it was added via an addStatement() + * operation. If the focus statement has a statement type of inferred + * then it was added via truth maintenance. + */ + INSERTED, + + /** + * The focus statement was in the database before and will not + * be in the database after the commit. When the database has inference + * and truth maintenance enabled, the statement that is the focus of + * this change record was either an explicit statement that was the + * subject of a removeStatements() operation on the connection, or it + * was an inferred statement that was removed as a result of truth + * maintenance. Either way, the statement is no longer provable as an + * inference using other statements still in the database after the + * commit. If it were still provable, the explicit statement would have + * had its type changed to inferred, and the inferred statement would + * have remained untouched by truth maintenance. If an inferred + * statement was the subject of a removeStatement() operation on the + * connection it would have resulted in a no-op, since inferences can + * only be removed via truth maintenance. + */ + REMOVED, + + /** + * This change action can only occur when inference and truth + * maintenance are enabled on the database. Sometimes an attempt at + * statement addition or removal via an addStatement() or + * removeStatements() operation on the connection will result in a type + * change rather than an actual assertion or deletion. When in + * inference mode, statements can have one of three statement types: + * explicit, inferred, or axiom (see {@link StatementEnum}). There are + * several reasons why a statement will change type rather than be + * asserted or deleted: + * <p> + * <ul> + * <li> A statement is asserted, but already exists in the database as + * an inference or an axiom. The existing statement will have its type + * changed from inference or axiom to explicit. </li> + * <li> An explicit statement is retracted, but is still provable by + * other means. It will have its type changed from explicit to + * inference. </li> + * <li> An explicit statement is retracted, but is one of the axioms + * needed for inference. It will have its type changed from explicit to + * axiom. </li> + * </ul> + */ + UPDATED, + +// /** +// * This change action can occur for one of two reasons: +// * <p> +// * <ul> +// * <li> A statement is asserted, but already exists in the database as +// * an explicit statement. </li> +// * <li> An inferred statement or an axiom is retracted. Only explicit +// * statements can be retracted via removeStatements() operations. </li> +// * </ul> +// */ +// NO_OP + + } + + /** + * Return the ISPO that is the focus of this change record. + * + * @return + * the {@link ISPO} + */ + ISPO getStatement(); + + /** + * Return the change action for this change record. + * + * @return + * the {@link ChangeAction} + */ + ChangeAction getChangeAction(); + +// /** +// * If the change action is {@link ChangeAction#TYPE_CHANGE}, this method +// * will return the old statement type of the focus statement. The +// * new statement type is available on the focus statement itself. +// * +// * @return +// * the old statement type of the focus statement +// */ +// StatementEnum getOldStatementType(); + +} Deleted: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2011-01-05 22:42:01 UTC (rev 4059) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -1,163 +0,0 @@ -package com.bigdata.rdf.changesets; - -import java.util.Collection; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.Map; -import org.apache.log4j.Logger; -import com.bigdata.rdf.model.BigdataStatement; -import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.store.AbstractTripleStore; -import com.bigdata.rdf.store.BigdataStatementIterator; -import com.bigdata.striterator.ChunkedArrayIterator; - -/** - * This is a very simple implementation of a change log. NOTE: This is not - * a particularly great implementation. First of all it ends up storing - * two copies of the change set. Secondly it needs to be smarter about - * concurrency, or maybe we can be smart about it when we do the - * implementation on the other side (the SAIL connection can just write - * change events to a buffer and then the buffer can be drained by - * another thread that doesn't block the actual read/write operations, - * although then we need to be careful not to issue the committed() - * notification before the buffer is drained). - * - * @author mike - * - */ -public class InMemChangeLog implements IChangeLog { - - protected static final Logger log = Logger.getLogger(InMemChangeLog.class); - - /** - * Running tally of new changes since the last commit notification. - */ - private final Map<ISPO,IChangeRecord> changeSet = - new HashMap<ISPO, IChangeRecord>(); - - /** - * Keep a record of the change set as of the last commit. - */ - private final Map<ISPO,IChangeRecord> committed = - new HashMap<ISPO, IChangeRecord>(); - - /** - * See {@link IChangeLog#changeEvent(IChangeRecord)}. - */ - public synchronized void changeEvent(final IChangeRecord record) { - - if (log.isInfoEnabled()) - log.info(record); - - changeSet.put(record.getStatement(), record); - - } - - /** - * See {@link IChangeLog#transactionCommited()}. - */ - public synchronized void transactionCommited() { - - if (log.isInfoEnabled()) - log.info("transaction committed"); - - committed.clear(); - - committed.putAll(changeSet); - - changeSet.clear(); - - } - - /** - * See {@link IChangeLog#transactionAborted()}. - */ - public synchronized void transactionAborted() { - - if (log.isInfoEnabled()) - log.info("transaction aborted"); - - changeSet.clear(); - - } - - /** - * Return the change set as of the last commmit point. - * - * @return - * a collection of {@link IChangeRecord}s as of the last commit - * point - */ - public Collection<IChangeRecord> getLastCommit() { - - return committed.values(); - - } - - /** - * Return the change set as of the last commmit point, using the supplied - * database to resolve ISPOs to BigdataStatements. - * - * @return - * a collection of {@link IChangeRecord}s as of the last commit - * point - */ - public Collection<IChangeRecord> getLastCommit(final AbstractTripleStore db) { - - return resolve(db, committed.values()); - - } - - /** - * Use the supplied database to turn a set of ISPO change records into - * BigdataStatement change records. BigdataStatements also implement - * ISPO, the difference being that BigdataStatements also contain - * materialized RDF terms for the 3 (or 4) positions, in addition to just - * the internal identifiers (IVs) for those terms. - * - * @param db - * the database containing the lexicon needed to materialize - * the BigdataStatement objects - * @param unresolved - * the ISPO change records that came from IChangeLog notification - * events - * @return - * the fully resolves BigdataStatement change records - */ - private Collection<IChangeRecord> resolve(final AbstractTripleStore db, - final Collection<IChangeRecord> unresolved) { - - final Collection<IChangeRecord> resolved = - new LinkedList<IChangeRecord>(); - - // collect up the ISPOs out of the unresolved change records - final ISPO[] spos = new ISPO[unresolved.size()]; - int i = 0; - for (IChangeRecord rec : unresolved) { - spos[i++] = rec.getStatement(); - } - - // use the database to resolve them into BigdataStatements - final BigdataStatementIterator it = - db.asStatementIterator( - new ChunkedArrayIterator<ISPO>(i, spos, null/* keyOrder */)); - - /* - * the BigdataStatementIterator will produce BigdataStatement objects - * in the same order as the original ISPO array - */ - for (IChangeRecord rec : unresolved) { - - final BigdataStatement stmt = it.next(); - - resolved.add(new ChangeRecord(stmt, rec.getChangeAction())); - - } - - return resolved; - - } - - - -} Copied: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java (from rev 4059, trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java) =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2011-01-06 20:13:43 UTC (rev 4061) @@ -0,0 +1,163 @@ +package com.bigdata.rdf.changesets; + +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Map; +import org.apache.log4j.Logger; +import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.store.BigdataStatementIterator; +import com.bigdata.striterator.ChunkedArrayIterator; + +/** + * This is a very simple implementation of a change log. NOTE: This is not + * a particularly great implementation. First of all it ends up storing + * two copies of the change set. Secondly it needs to be smarter about + * concurrency, or maybe we can be smart about it when we do the + * implementation on the other side (the SAIL connection can just write + * change events to a buffer and then the buffer can be drained by + * another thread that doesn't block the actual read/write operations, + * although then we need to be careful not to issue the committed() + * notification before the buffer is drained). + * + * @author mike + * + */ +public class InMemChangeLog implements IChangeLog { + + protected static final Logger log = Logger.getLogger(InMemChangeLog.class); + + /** + * Running tally of new changes since the last commit notification. + */ + private final Map<ISPO,IChangeRecord> changeSet = + new HashMap<ISPO, IChangeRecord>(); + + /** + * Keep a record of the change set as of the last commit. + */ + private final Map<ISPO,IChangeRecord> committed = + new HashMap<ISPO, IChangeRecord>(); + + /** + * See {@link IChangeLog#changeEvent(IChangeRecord)}. + */ + public synchronized void changeEvent(final IChangeRecord record) { + + if (log.isInfoEnabled()) + log.info(record); + + changeSet.put(record.getStatement(), record); + + } + + /** + * See {@link IChangeLog#transactionCommited()}. + */ + public synchronized void transactionCommited() { + + if (log.isInfoEnabled()) + log.info("transaction committed"); + + committed.clear(); + + committed.putAll(changeSet); + + changeSet.clear(); + + } + + /** + * See {@link IChangeLog#transactionAborted()}. + */ + public synchronized void transactionAborted() { + + if (log.isInfoEnabled()) + log.info("transaction aborted"); + + changeSet.clear(); + + } + + /** + * Return the change set as of the last commmit point. + * + * @return + * a collection of {@link IChangeRecord}s as of the last commit + * point + */ + public Collection<IChangeRecord> getLastCommit() { + + return committed.values(); + + } + + /** + * Return the change set as of the last commmit point, using the supplied + * database to resolve ISPOs to BigdataStatements. + * + * @return + * a collection of {@link IChangeRecord}s as of the last commit + * point + */ + public Collection<IChangeRecord> getLastCommit(final AbstractTripleStore db) { + + return resolve(db, committed.values()); + + } + + /** + * Use the supplied database to turn a set of ISPO change records into + * BigdataStatement change records. BigdataStatements also implement + * ISPO, the difference being that BigdataStatements also contain + * materialized RDF terms for the 3 (or 4) positions, in addition to just + * the internal identifiers (IVs) for those terms. + * + * @param db + * the database containing the lexicon needed to materialize + * the BigdataStatement objects + * @param unresolved + * the ISP... [truncated message content] |
From: <tho...@us...> - 2011-01-06 19:39:03
|
Revision: 4060 http://bigdata.svn.sourceforge.net/bigdata/?rev=4060&view=rev Author: thompsonbry Date: 2011-01-06 19:38:57 +0000 (Thu, 06 Jan 2011) Log Message: ----------- Fixed broken assert in ISPO. Added isTruthMaintenance() to BigdataSail. Modified Paths: -------------- trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java 2011-01-05 22:42:01 UTC (rev 4059) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java 2011-01-06 19:38:57 UTC (rev 4060) @@ -332,7 +332,7 @@ public static ModifiedEnum[] fromBooleans(final boolean[] b, final int n) { - assert n < b.length && n % 2 == 0; + assert n <= b.length && n % 2 == 0 : "n="+n+", b.length="+b.length; final ModifiedEnum[] m = new ModifiedEnum[n/2]; for (int i = 0; i < n; i+=2) { Modified: trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-01-05 22:42:01 UTC (rev 4059) +++ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-01-06 19:38:57 UTC (rev 4060) @@ -426,6 +426,18 @@ } /** + * Return <code>true</code> if the SAIL is using automated truth + * maintenance. + * + * @see Options#TRUTH_MAINTENANCE + */ + public boolean isTruthMaintenance() { + + return truthMaintenance; + + } + + /** * Return <code>true</code> iff star joins are enabled. */ public boolean isStarJoins() { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2011-01-05 22:42:08
|
Revision: 4059 http://bigdata.svn.sourceforge.net/bigdata/?rev=4059&view=rev Author: mrpersonick Date: 2011-01-05 22:42:01 +0000 (Wed, 05 Jan 2011) Log Message: ----------- added support for inline mathematical operators Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/TestInlineConstraints.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNestedOptionals.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2011-01-05 18:31:58 UTC (rev 4058) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2011-01-05 22:42:01 UTC (rev 4059) @@ -31,6 +31,9 @@ import java.math.BigInteger; import java.util.ArrayList; import java.util.UUID; + +import org.openrdf.query.algebra.MathExpr.MathOp; + import com.bigdata.btree.keys.IKeyBuilder; import com.bigdata.btree.keys.KeyBuilder; import com.bigdata.rawstore.Bytes; @@ -162,6 +165,174 @@ } + public static final IV numericalMath(final IV iv1, final IV iv2, + final MathOp op) { + + if (!iv1.isInline()) + throw new IllegalArgumentException( + "left term is not inline: left=" + iv1 + ", right=" + iv2); + + if (!iv2.isInline()) + throw new IllegalArgumentException( + "right term is not inline: left=" + iv1 + ", right=" + iv2); + + if (!iv1.isLiteral()) + throw new IllegalArgumentException( + "left term is not literal: left=" + iv1 + ", right=" + iv2); + + if (!iv2.isLiteral()) + throw new IllegalArgumentException( + "right term is not literal: left=" + iv1 + ", right=" + iv2); + + final DTE dte1 = iv1.getDTE(); + final DTE dte2 = iv2.getDTE(); + + if (!dte1.isNumeric()) + throw new IllegalArgumentException( + "right term is not numeric: left=" + iv1 + ", right=" + iv2); + + if (!dte2.isNumeric()) + throw new IllegalArgumentException( + "left term is not numeric: left=" + iv1 + ", right=" + iv2); + + final AbstractLiteralIV num1 = (AbstractLiteralIV) iv1; + final AbstractLiteralIV num2 = (AbstractLiteralIV) iv2; + + // if one's a BigDecimal we should use the BigDecimal comparator for both + if (dte1 == DTE.XSDDecimal || dte2 == DTE.XSDDecimal) { + return numericalMath(num1.decimalValue(), num2.decimalValue(), op); + } + + // same for BigInteger + if (dte1 == DTE.XSDInteger || dte2 == DTE.XSDInteger) { + return numericalMath(num1.integerValue(), num2.integerValue(), op); + } + + // fixed length numerics + if (dte1.isFloatingPointNumeric() || dte2.isFloatingPointNumeric()) { + // non-BigDecimal floating points + if (dte1 == DTE.XSDFloat && dte2 == DTE.XSDFloat) + return numericalMath(num1.floatValue(), num2.floatValue(), op); + else + return numericalMath(num1.doubleValue(), num2.doubleValue(), op); + } else { + // non-BigInteger integers + if (dte1 == DTE.XSDInt && dte2 == DTE.XSDInt) + return numericalMath(num1.intValue(), num2.intValue(), op); + else + return numericalMath(num1.longValue(), num2.longValue(), op); + } + + } + + private static final IV numericalMath(final BigDecimal left, + final BigDecimal right, final MathOp op) { + + switch(op) { + case PLUS: + return new XSDDecimalIV(left.add(right)); + case MINUS: + return new XSDDecimalIV(left.subtract(right)); + case MULTIPLY: + return new XSDDecimalIV(left.multiply(right)); + case DIVIDE: + return new XSDDecimalIV(left.divide(right)); + default: + throw new UnsupportedOperationException(); + } + + } + + private static final IV numericalMath(final BigInteger left, + final BigInteger right, final MathOp op) { + + switch(op) { + case PLUS: + return new XSDIntegerIV(left.add(right)); + case MINUS: + return new XSDIntegerIV(left.subtract(right)); + case MULTIPLY: + return new XSDIntegerIV(left.multiply(right)); + case DIVIDE: + return new XSDIntegerIV(left.divide(right)); + default: + throw new UnsupportedOperationException(); + } + + } + + private static final IV numericalMath(final float left, + final float right, final MathOp op) { + + switch(op) { + case PLUS: + return new XSDFloatIV(left+right); + case MINUS: + return new XSDFloatIV(left-right); + case MULTIPLY: + return new XSDFloatIV(left*right); + case DIVIDE: + return new XSDFloatIV(left/right); + default: + throw new UnsupportedOperationException(); + } + + } + + private static final IV numericalMath(final double left, + final double right, final MathOp op) { + + switch(op) { + case PLUS: + return new XSDDoubleIV(left+right); + case MINUS: + return new XSDDoubleIV(left-right); + case MULTIPLY: + return new XSDDoubleIV(left*right); + case DIVIDE: + return new XSDDoubleIV(left/right); + default: + throw new UnsupportedOperationException(); + } + + } + + private static final IV numericalMath(final int left, + final int right, final MathOp op) { + + switch(op) { + case PLUS: + return new XSDIntIV(left+right); + case MINUS: + return new XSDIntIV(left-right); + case MULTIPLY: + return new XSDIntIV(left*right); + case DIVIDE: + return new XSDIntIV(left/right); + default: + throw new UnsupportedOperationException(); + } + + } + + private static final IV numericalMath(final long left, + final long right, final MathOp op) { + + switch(op) { + case PLUS: + return new XSDLongIV(left+right); + case MINUS: + return new XSDLongIV(left-right); + case MULTIPLY: + return new XSDLongIV(left*right); + case DIVIDE: + return new XSDLongIV(left/right); + default: + throw new UnsupportedOperationException(); + } + + } + /** * Used to test whether a given value constant can be used in an inline * filter or not. If so, we can use one of the inline constraints Added: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java 2011-01-05 22:42:01 UTC (rev 4059) @@ -0,0 +1,134 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.rdf.internal.constraints; + +import java.util.LinkedHashMap; +import java.util.Map; + +import org.openrdf.query.algebra.Compare.CompareOp; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IConstant; +import com.bigdata.bop.IVariable; +import com.bigdata.bop.IVariableOrConstant; +import com.bigdata.bop.NV; +import com.bigdata.bop.PipelineOp; +import com.bigdata.bop.constraint.BOpConstraint; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.internal.IVUtility; +import com.bigdata.rdf.internal.constraints.MathBOp.Annotations; + +/** + * Use inline terms to perform numerical comparison operations. + * + * @see IVUtility#numericalCompare(IV, IV) + */ +public class CompareBOp extends BOpConstraint { + + /** + * + */ + private static final long serialVersionUID = 1L; + + public interface Annotations extends PipelineOp.Annotations { + + /** + * The compare operator + */ + String OP = CompareBOp.class.getName() + ".op"; + + } + + /** + * Required shallow copy constructor. + */ + public CompareBOp(final BOp[] values, + final Map<String, Object> annotations) { + super(values, annotations); + } + + /** + * Required deep copy constructor. + */ + public CompareBOp(final CompareBOp op) { + super(op); + } + + public CompareBOp(final IVariableOrConstant<IV> left, + final IVariableOrConstant<IV> right, final CompareOp op) { + + super(new BOp[] { left, right }, NV.asMap(new NV(Annotations.OP, op))); + + if (left == null || right == null || op == null) + throw new IllegalArgumentException(); + + } + + public boolean accept(final IBindingSet s) { + + final IV left = ((IVariableOrConstant<IV>) get(0)).get(s); + final IV right = ((IVariableOrConstant<IV>) get(1)).get(s); + + if (left == null || right == null) + return true; // not yet bound. + + if (IVUtility.canNumericalCompare(left) && + IVUtility.canNumericalCompare(right)) { + + return _accept(IVUtility.numericalCompare(left, right)); + + } else { + + return _accept(left.compareTo(right)); + + } + + } + + protected boolean _accept(final int compare) { + + final CompareOp op = (CompareOp) getProperty(Annotations.OP); + + switch(op) { + case EQ: + return compare == 0; + case NE: + return compare != 0; + case GT: + return compare > 0; + case GE: + return compare >= 0; + case LT: + return compare < 0; + case LE: + return compare <= 0; + default: + throw new UnsupportedOperationException(); + } + + } + +} Added: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java 2011-01-05 22:42:01 UTC (rev 4059) @@ -0,0 +1,216 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.internal.constraints; + +import java.util.LinkedHashMap; +import java.util.Map; + +import org.openrdf.query.algebra.Compare.CompareOp; +import org.openrdf.query.algebra.MathExpr.MathOp; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.Constant; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IConstant; +import com.bigdata.bop.IVariable; +import com.bigdata.bop.IVariableOrConstant; +import com.bigdata.bop.ImmutableBOp; +import com.bigdata.bop.NV; +import com.bigdata.bop.Constant.Annotations; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.internal.IVUtility; + +/** + * A math expression involving a left and right IVariableOrConstant operand. + */ +final public class MathBOp extends ImmutableBOp + implements IVariableOrConstant<IV> { + + /** + * + */ + private static final long serialVersionUID = 9136864442064392445L; + + public interface Annotations extends ImmutableBOp.Annotations { + + /** + * The {@link IVariable} which is bound to that constant value + * (optional). + */ + String OP = MathBOp.class.getName() + ".op"; + + } + + final public boolean isVar() { + + return ((IVariableOrConstant) get(0)).isVar() || + ((IVariableOrConstant) get(1)).isVar(); + + } + + final public boolean isConstant() { + + return !isVar(); + + } + + /** + * Required deep copy constructor. + * + * @param op + */ + public MathBOp(final MathBOp op) { + + super(op); + + } + + public MathBOp(final IVariableOrConstant<IV> left, + final IVariableOrConstant<IV> right, final MathOp op) { + + super(new BOp[] { left, right }, NV.asMap(new NV(Annotations.OP, op))); + + if (left == null || right == null || op == null) + throw new IllegalArgumentException(); + + } + +// /** +// * Clone is overridden to reduce heap churn. +// */ +// final public Math clone() { +// +// return this; +// +// } + + public IVariableOrConstant<IV> left() { + return (IVariableOrConstant<IV>) get(0); + } + + public IVariableOrConstant<IV> right() { + return (IVariableOrConstant<IV>) get(1); + } + + public MathOp op() { + return (MathOp) getRequiredProperty(Annotations.OP); + } + + public String toString() { + + final StringBuilder sb = new StringBuilder(); + sb.append(op()); + sb.append("(").append(left()).append(", ").append(right()).append(")"); + return sb.toString(); + + } + + final public boolean equals(final MathBOp m) { + + if (m == null) + return false; + + if (this == m) + return true; + + return op().equals(m.op()) && + left().equals(m.left()) && + right().equals(m.right()); + + } + + final public boolean equals(final IVariableOrConstant<IV> o) { + + if(!(o instanceof MathBOp)) { + + // incomparable types. + return false; + + } + + return equals((MathBOp) o); + + } + + + /** + * Caches the hash code. + */ + private int hash = 0; + + public int hashCode() { + + int h = hash; + + if (h == 0) { + + final int n = arity(); + + for (int i = 0; i < n; i++) { + + h = 31 * h + get(i).hashCode(); + + } + + h = 31 * h + op().hashCode(); + + hash = h; + + } + + return h; + + } + + final public IV get() { + + final IV left = left().get(); + final IV right = right().get(); + + if (left == null || right == null) + return null; + + return IVUtility.numericalMath(left, right, op()); + + } + + final public IV get(final IBindingSet bindingSet) { + + final IV left = left().get(bindingSet); + final IV right = right().get(bindingSet); + + if (left == null || right == null) + return null; + + return IVUtility.numericalMath(left, right, op()); + + } + + final public String getName() { + + throw new UnsupportedOperationException(); + + } + +} Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/TestInlineConstraints.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/TestInlineConstraints.java 2011-01-05 18:31:58 UTC (rev 4058) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/TestInlineConstraints.java 2011-01-05 22:42:01 UTC (rev 4059) @@ -27,8 +27,9 @@ package com.bigdata.rdf.internal.constraints; -import java.util.Properties; import org.openrdf.model.vocabulary.RDF; +import org.openrdf.query.algebra.Compare.CompareOp; +import org.openrdf.query.algebra.MathExpr.MathOp; import com.bigdata.bop.Constant; import com.bigdata.bop.IBindingSet; @@ -140,7 +141,7 @@ }, // constraints on the rule. new IConstraint[] { - new InlineGT(a, _35.getIV()) + new CompareBOp(a, new Constant<IV>(_35.getIV()), CompareOp.GT) }); try { @@ -242,7 +243,7 @@ }, // constraints on the rule. new IConstraint[] { - new InlineGE(a, _35.getIV()) + new CompareBOp(a, new Constant<IV>(_35.getIV()), CompareOp.GE) }); try { @@ -346,7 +347,7 @@ }, // constraints on the rule. new IConstraint[] { - new InlineLT(a, _35.getIV()) + new CompareBOp(a, new Constant<IV>(_35.getIV()), CompareOp.LT) }); if (log.isInfoEnabled()) @@ -454,7 +455,7 @@ }, // constraints on the rule. new IConstraint[] { - new InlineLE(a, _35.getIV()) + new CompareBOp(a, new Constant<IV>(_35.getIV()), CompareOp.LE) }); if (log.isInfoEnabled()) @@ -502,6 +503,117 @@ } + public void testMath() { + + // store with no owl:sameAs closure + AbstractTripleStore db = getStore(); + + // do not run if we are not inlining + if (!db.getLexiconRelation().isInlineLiterals()) { + return; + } + + try { + + BigdataValueFactory vf = db.getValueFactory(); + + final BigdataURI A = vf.createURI("http://www.bigdata.com/A"); + final BigdataURI B = vf.createURI("http://www.bigdata.com/B"); + final BigdataURI C = vf.createURI("http://www.bigdata.com/C"); + final BigdataURI D = vf.createURI("http://www.bigdata.com/D"); + final BigdataURI X = vf.createURI("http://www.bigdata.com/X"); + final BigdataURI AGE = vf.createURI("http://www.bigdata.com/AGE"); + final BigdataLiteral _5 = vf.createLiteral((double) 5); + final BigdataLiteral _30 = vf.createLiteral((double) 30); + final BigdataLiteral _25 = vf.createLiteral((double) 25); + final BigdataLiteral _35 = vf.createLiteral((long) 35); + final BigdataLiteral _45 = vf.createLiteral((long) 45); + + db.addTerms( new BigdataValue[] { A, B, C, X, AGE, _25, _35, _45, D, _5, _30 } ); + + { + StatementBuffer buffer = new StatementBuffer + ( db, 100/* capacity */ + ); + + buffer.add(A, RDF.TYPE, X); + buffer.add(A, AGE, _25); + buffer.add(B, RDF.TYPE, X); + buffer.add(B, AGE, _45); + buffer.add(C, RDF.TYPE, X); + buffer.add(C, AGE, _35); + buffer.add(D, AGE, _30); + + // write statements on the database. + buffer.flush(); + + } + + if (log.isInfoEnabled()) + log.info("\n" +db.dumpStore(true, true, false)); + + { // works great + + final String SPO = db.getSPORelation().getNamespace(); + final IVariable<IV> s = Var.var("s"); + final IConstant<IV> type = new Constant<IV>(db.getIV(RDF.TYPE)); + final IConstant<IV> x = new Constant<IV>(X.getIV()); + final IConstant<IV> age = new Constant<IV>(AGE.getIV()); + final IVariable<IV> a = Var.var("a"); + final IConstant<IV> d = new Constant<IV>(D.getIV()); + final IVariable<IV> dAge = Var.var("dAge"); + + final IRule rule = + new Rule("test_math", null, // head + new IPredicate[] { + new SPOPredicate(SPO, d, age, dAge), + new SPOPredicate(SPO, s, type, x), + new SPOPredicate(SPO, s, age, a) + }, + // constraints on the rule. + new IConstraint[] { + new CompareBOp(a, new MathBOp(dAge, new Constant<IV>(_5.getIV()), MathOp.PLUS), CompareOp.GT) + }); + + try { + + int numSolutions = 0; + + IChunkedOrderedIterator<ISolution> solutions = runQuery(db, rule); + + while (solutions.hasNext()) { + + ISolution solution = solutions.next(); + + IBindingSet bs = solution.getBindingSet(); + + System.err.println(bs); + + assertEquals(bs.get(s).get(), B.getIV()); + assertEquals(bs.get(a).get(), _45.getIV()); + + numSolutions++; + + } + + assertEquals("wrong # of solutions", 1, numSolutions); + + } catch(Exception ex) { + + ex.printStackTrace(); + + } + + } + + } finally { + + db.__tearDownUnitTest(); + + } + + } + private IChunkedOrderedIterator<ISolution> runQuery(AbstractTripleStore db, IRule rule) throws Exception { // run the query as a native rule. Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java 2011-01-05 18:31:58 UTC (rev 4058) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java 2011-01-05 22:42:01 UTC (rev 4059) @@ -25,10 +25,13 @@ import org.openrdf.query.Dataset; import org.openrdf.query.QueryEvaluationException; import org.openrdf.query.algebra.Compare; +import org.openrdf.query.algebra.Compare.CompareOp; import org.openrdf.query.algebra.Filter; import org.openrdf.query.algebra.Group; import org.openrdf.query.algebra.Join; import org.openrdf.query.algebra.LeftJoin; +import org.openrdf.query.algebra.MathExpr; +import org.openrdf.query.algebra.MathExpr.MathOp; import org.openrdf.query.algebra.MultiProjection; import org.openrdf.query.algebra.Or; import org.openrdf.query.algebra.Order; @@ -39,14 +42,13 @@ import org.openrdf.query.algebra.QueryRoot; import org.openrdf.query.algebra.SameTerm; import org.openrdf.query.algebra.StatementPattern; +import org.openrdf.query.algebra.StatementPattern.Scope; import org.openrdf.query.algebra.TupleExpr; import org.openrdf.query.algebra.UnaryTupleOperator; import org.openrdf.query.algebra.Union; import org.openrdf.query.algebra.ValueConstant; import org.openrdf.query.algebra.ValueExpr; import org.openrdf.query.algebra.Var; -import org.openrdf.query.algebra.Compare.CompareOp; -import org.openrdf.query.algebra.StatementPattern.Scope; import org.openrdf.query.algebra.evaluation.impl.EvaluationStrategyImpl; import org.openrdf.query.algebra.evaluation.iterator.FilterIterator; import org.openrdf.query.algebra.helpers.QueryModelVisitorBase; @@ -57,11 +59,11 @@ import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IConstraint; import com.bigdata.bop.IPredicate; +import com.bigdata.bop.IPredicate.Annotations; import com.bigdata.bop.IVariable; import com.bigdata.bop.IVariableOrConstant; import com.bigdata.bop.NV; import com.bigdata.bop.PipelineOp; -import com.bigdata.bop.IPredicate.Annotations; import com.bigdata.bop.ap.Predicate; import com.bigdata.bop.constraint.EQ; import com.bigdata.bop.constraint.EQConstant; @@ -77,12 +79,14 @@ import com.bigdata.rdf.internal.DummyIV; import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.internal.IVUtility; +import com.bigdata.rdf.internal.constraints.CompareBOp; import com.bigdata.rdf.internal.constraints.InlineEQ; import com.bigdata.rdf.internal.constraints.InlineGE; import com.bigdata.rdf.internal.constraints.InlineGT; import com.bigdata.rdf.internal.constraints.InlineLE; import com.bigdata.rdf.internal.constraints.InlineLT; import com.bigdata.rdf.internal.constraints.InlineNE; +import com.bigdata.rdf.internal.constraints.MathBOp; import com.bigdata.rdf.lexicon.LexiconRelation; import com.bigdata.rdf.model.BigdataValue; import com.bigdata.rdf.sail.BigdataSail.Options; @@ -1589,70 +1593,85 @@ compare.getOperator()); } + /** + * FIXME: implement compare two variables and compare MathExpr + */ private IConstraint generateConstraint(ValueExpr left, ValueExpr right, CompareOp operator) { - IVariable<IV> var = null; - BigdataValue constant = null; + IVariableOrConstant<IV> iv1, iv2; + if (left instanceof Var) { - var = com.bigdata.bop.Var.var(((Var) left).getName()); + iv1 = com.bigdata.bop.Var.var(((Var) left).getName()); } else if (left instanceof ValueConstant) { - constant = (BigdataValue) ((ValueConstant) left).getValue(); + final IV iv = ((BigdataValue) ((ValueConstant) left).getValue()).getIV(); + if (iv == null) + return null; + iv1 = new Constant<IV>(iv); + } else if (left instanceof MathExpr) { + iv1 = generateMath((MathExpr) left); + if (iv1 == null) + return null; } else { return null; } + if (right instanceof Var) { - var = com.bigdata.bop.Var.var(((Var) right).getName()); + iv2 = com.bigdata.bop.Var.var(((Var) right).getName()); } else if (right instanceof ValueConstant) { - constant = (BigdataValue) ((ValueConstant) right).getValue(); + final IV iv = ((BigdataValue) ((ValueConstant) right).getValue()).getIV(); + if (iv == null) + return null; + iv2 = new Constant<IV>(iv); + } else if (right instanceof MathExpr) { + iv2 = generateMath((MathExpr) right); + if (iv2 == null) + return null; } else { return null; } - if (log.isDebugEnabled()) { - log.debug("var: " + var); - log.debug("constant: " + constant); - log.debug("constant.getIV(): " + constant.getIV()); - } - if (var == null || constant == null || constant.getIV() == null) { - if (log.isDebugEnabled()) { - log.debug("left: " + left); - log.debug("right: " + right); - } + + return new CompareBOp(iv1, iv2, operator); + + } + + private MathBOp generateMath(final MathExpr mathExpr) { + final ValueExpr left = mathExpr.getLeftArg(); + final ValueExpr right = mathExpr.getRightArg(); + final MathOp op = mathExpr.getOperator(); + + IVariableOrConstant<IV> iv1, iv2; + + if (left instanceof Var) { + iv1 = com.bigdata.bop.Var.var(((Var) left).getName()); + } else if (left instanceof ValueConstant) { + final IV iv = ((BigdataValue) ((ValueConstant) left).getValue()).getIV(); + if (iv == null) + return null; + iv1 = new Constant<IV>(iv); + } else if (left instanceof MathExpr) { + iv1 = generateMath((MathExpr) left); + if (iv1 == null) + return null; + } else { return null; } - final IV iv = constant.getIV(); - // we can do equals, not equals - if (inlineTerms && IVUtility.canNumericalCompare(iv)) { - if (log.isInfoEnabled()) { - log.debug("inline constant, using inline numerical comparison: " - + iv); - } - try { - switch (operator) { - case GT: - return new InlineGT(var, iv); - case GE: - return new InlineGE(var, iv); - case LT: - return new InlineLT(var, iv); - case LE: - return new InlineLE(var, iv); - case EQ: - return new InlineEQ(var, iv); - case NE: - return new InlineNE(var, iv); - default: - return null; - } - } catch (Exception ex) { - return null; - } - } else if (operator == CompareOp.EQ) { - return new EQConstant(var, new Constant(iv)); - } else if (operator == CompareOp.NE) { - return new NEConstant(var, new Constant(iv)); + + if (right instanceof Var) { + iv2 = com.bigdata.bop.Var.var(((Var) right).getName()); + } else if (right instanceof ValueConstant) { + final IV iv = ((BigdataValue) ((ValueConstant) right).getValue()).getIV(); + if (iv == null) + return null; + iv2 = new Constant<IV>(iv); + } else if (right instanceof MathExpr) { + iv2 = generateMath((MathExpr) right); + if (iv2 == null) + return null; } else { return null; } + + return new MathBOp(iv1, iv2, op); } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNestedOptionals.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNestedOptionals.java 2011-01-05 18:31:58 UTC (rev 4058) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNestedOptionals.java 2011-01-05 22:42:01 UTC (rev 4059) @@ -251,7 +251,7 @@ } - private void __testNestedOptionals1() throws Exception { + public void testNestedOptionals1() throws Exception { final BigdataSail sail = getSail(); sail.initialize(); @@ -333,11 +333,11 @@ final Projection p = (Projection) root.getArg(); final LeftJoin leftJoin = (LeftJoin) p.getArg(); - final List<Tail> tails = collectTails(leftJoin); + final List<Op> tails = collectTails(leftJoin); if (INFO) { System.err.println(query); - for (Tail t : tails) { + for (Op t : tails) { System.err.println(t); } } @@ -355,7 +355,7 @@ } - private void __testNestedOptionals2() throws Exception { + public void testNestedOptionals2() throws Exception { final BigdataSail sail = getSail(); sail.initialize(); @@ -446,7 +446,7 @@ " ?s <"+RDF.TYPE+"> <"+T4+"> .\n" + // tail=A, group=1, parent=0 " ?s <"+RDF.TYPE+"> <"+T5+"> .\n" + // tail=B, group=1, parent=0 " ?s <"+P4+"> ?p4 .\n" + // tail=C, group=1, parent=0 - " FILTER ( ?p4 > 30 ) .\n" + + " FILTER ( ?p4 > (?p1*?p0+10+20) ) .\n" + " OPTIONAL { ?s <"+P5+"> ?p5 . }\n" + // tail=D, group=2, parent=1 " OPTIONAL { ?s <"+P6+"> ?p6 . }\n" + // tail=E, group=3, parent=1 " }\n" + @@ -490,11 +490,11 @@ final Projection p = (Projection) root.getArg(); final LeftJoin leftJoin = (LeftJoin) p.getArg(); - final List<Tail> tails = collectTails(leftJoin); + final List<Op> tails = collectTails(leftJoin); if (INFO) { System.err.println(query); - for (Tail t : tails) { + for (Op t : tails) { System.err.println(t); } } @@ -513,9 +513,9 @@ } - private List<Tail> collectTails(final LeftJoin root) { + private List<Op> collectTails(final LeftJoin root) { - final List<Tail> tails = new LinkedList<Tail>(); + final List<Op> tails = new LinkedList<Op>(); log.info("\n"+root); @@ -530,11 +530,17 @@ return ++group; } - private void collectTails(final List<Tail> tails, final LeftJoin leftJoin, + private void collectTails(final List<Op> tails, final LeftJoin leftJoin, final boolean rslj, final int g, final int pg) { - final ValueExpr condition = leftJoin.getCondition(); + final ValueExpr ve = leftJoin.getCondition(); // conditional for tails in this group + if (ve != null) { + final Constraint c = new Constraint(ve); + c.setGroup(g); + c.setParentGroup(pg); + tails.add(c); + } final TupleExpr left = leftJoin.getLeftArg(); @@ -571,7 +577,7 @@ } - private void collectTails(final List<Tail> tails, final Join join, + private void collectTails(final List<Op> tails, final Join join, final boolean rslj, final int g, final int pg) { final TupleExpr left = join.getLeftArg(); @@ -604,11 +610,17 @@ } - private void collectTails(final List<Tail> tails, final Filter filter, + private void collectTails(final List<Op> tails, final Filter filter, final boolean rslj, final int g, final int pg) { final ValueExpr ve = filter.getCondition(); // make a constraint, attach it to the rule + if (ve != null) { + final Constraint c = new Constraint(ve); + c.setGroup(g); + c.setParentGroup(pg); + tails.add(c); + } final TupleExpr arg = filter.getArg(); @@ -626,7 +638,7 @@ } - private void collectTails(final List<Tail> tails, final StatementPattern sp, + private void collectTails(final List<Op> tails, final StatementPattern sp, final boolean rslj, final int g, final int pg) { final Tail t = new Tail(sp); @@ -637,7 +649,19 @@ } - private static class Tail { + private static interface Op { + + void setGroup(int g); + + int getGroup(); + + void setParentGroup(int pg); + + int getParentGroup(); + + } + + private static class Tail implements Op { private StatementPattern sp; @@ -713,4 +737,54 @@ } + private static class Constraint implements Op { + + private ValueExpr ve; + + private int group, parent; + + public Constraint(ValueExpr ve) { + + this.ve = ve; + + } + + public void setGroup(final int group) { + + this.group = group; + + } + + public int getGroup() { + + return group; + + } + + public void setParentGroup(final int parent) { + + this.parent = parent; + + } + + public int getParentGroup() { + + return parent; + + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + + sb.append("Constraint: group=").append(group); + sb.append(", parent=").append(parent); + sb.append(", filter=").append(ve); + + return sb.toString(); + + } + + } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <btm...@us...> - 2011-01-05 18:32:05
|
Revision: 4058 http://bigdata.svn.sourceforge.net/bigdata/?rev=4058&view=rev Author: btmurphy Date: 2011-01-05 18:31:58 +0000 (Wed, 05 Jan 2011) Log Message: ----------- [branch dev-btm]: checkpoint - updates to com.bigdata.jini.quorum.QuorumPeerManager - changed how string value of peerAddress is retrieved from InetAddress.toString to InetAddress.getHostAddress to remove the forward slash prefix that toString returns; also added a number of additonal public methods, where some are intended to keep this class in sync with the ZooKeeper client, and others may be of general use Modified Paths: -------------- branches/dev-btm/bigdata-jini/src/java/com/bigdata/jini/quorum/QuorumPeerManager.java Modified: branches/dev-btm/bigdata-jini/src/java/com/bigdata/jini/quorum/QuorumPeerManager.java =================================================================== --- branches/dev-btm/bigdata-jini/src/java/com/bigdata/jini/quorum/QuorumPeerManager.java 2011-01-05 15:50:20 UTC (rev 4057) +++ branches/dev-btm/bigdata-jini/src/java/com/bigdata/jini/quorum/QuorumPeerManager.java 2011-01-05 18:31:58 UTC (rev 4058) @@ -50,6 +50,7 @@ import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.SessionExpiredException; +import org.apache.zookeeper.Watcher; import org.apache.zookeeper.Watcher.Event.KeeperState; import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Stat; @@ -57,6 +58,8 @@ import java.io.IOException; import java.net.InetAddress; import java.rmi.RemoteException; +import java.util.ArrayList; +import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -93,6 +96,8 @@ private LookupCache quorumServiceCache; private Map<UUID, String> hostPortMap = new ConcurrentHashMap<UUID, String>(); + private Map<UUID, QuorumPeerAttr> quorumPeerAttrMap = + new ConcurrentHashMap<UUID, QuorumPeerAttr>(); private volatile String connectString = null; private volatile ZooKeeper zkClient; @@ -147,11 +152,31 @@ // Wrapped methods from org.apache.zookeper.ZooKeeper client class + public long getSessionId() throws IOException { + checkTerminated(); + return getClient().getSessionId(); + } + + public byte[] getSessionPasswd() throws IOException { + checkTerminated(); + return getClient().getSessionPasswd(); + } + + public int getSessionTimeout() throws IOException { + checkTerminated(); + return getClient().getSessionTimeout(); + } + public void addAuthInfo(String scheme, byte[] auth) throws IOException { checkTerminated(); getClient().addAuthInfo(scheme, auth); } + public void register(Watcher watcher) throws IOException { + checkTerminated(); + getClient().register(watcher); + } + public void close() { if (terminated) return; if ( (zkClient != null) && (zkClient.getState().isAlive()) ) { @@ -411,6 +436,50 @@ return connectString; } + // Need to keep the addresses in order with their respective ports + public List<List<String>> getServerInfo() { + List<List<String>> retList = new ArrayList<List<String>>(); + Collection<QuorumPeerAttr> attrs = quorumPeerAttrMap.values(); + if ( attrs.isEmpty() ) { + logger.debug("no zookeeper servers discovered"); + return retList; + } + for (QuorumPeerAttr attr : attrs) { + String addr = (attr.address).getHostAddress(); + String peerPort = String.valueOf(attr.peerPort); + String electionPort = String.valueOf(attr.electionPort); + List<String> subList = new ArrayList<String>(); + subList.add(addr); + subList.add(peerPort); + subList.add(electionPort); + retList.add(subList); + } + return retList; + } + + // Client ports should be the same for all zookeeper servers + public int getClientPort() { + Iterator<QuorumPeerAttr> itr = (quorumPeerAttrMap.values()).iterator(); + if ( !itr.hasNext() ) { + logger.debug("no zookeeper servers discovered [clientPort=-1]"); + return -1; + } + + int port0 = (itr.next()).clientPort; + + // client ports from each zookeeper server should be the same + boolean allEqual = true; + while( itr.hasNext() ) { + int port = (itr.next()).clientPort; + if (port != port0) allEqual = false; + } + if (!allEqual) { + logger.warn("not all zookeeper servers configured with same " + +"client port - "+quorumPeerAttrMap.values()); + } + return port0; + } + // Private methods private ZooKeeper getClient() throws IOException { @@ -528,10 +597,11 @@ if(logger.isDebugEnabled()) { logger.log(Level.DEBUG, "1 of "+ensembleSize+" quorum peer(s) " - +"DISCOVERED [addr="+peerAddr+", port=" - +clientPort+"]"); + +"DISCOVERED [addr="+peerAddr.getHostAddress() + +", port="+clientPort+"]"); } - hostPortMap.put(serviceUUID, peerAddr+":"+clientPort); + hostPortMap.put + (serviceUUID, peerAddr.getHostAddress()+":"+clientPort); // Build connectString when all expected peers found synchronized(syncObj) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-05 15:50:26
|
Revision: 4057 http://bigdata.svn.sourceforge.net/bigdata/?rev=4057&view=rev Author: thompsonbry Date: 2011-01-05 15:50:20 +0000 (Wed, 05 Jan 2011) Log Message: ----------- Reduced the default term cache capacity from 50k to 5k. This restores the performance level of the trunk when using the new query engine with the chunked running query impl against the BSBM 100M and LUBM U50 benchmarks. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2011-01-05 14:02:07 UTC (rev 4056) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2011-01-05 15:50:20 UTC (rev 4057) @@ -591,7 +591,7 @@ String TERM_CACHE_CAPACITY = AbstractTripleStore.class.getName() + ".termCache.capacity"; - String DEFAULT_TERM_CACHE_CAPACITY = "50000"; + String DEFAULT_TERM_CACHE_CAPACITY = "5000";//"50000"; /** * The name of the class that will establish the pre-defined This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-05 14:02:14
|
Revision: 4056 http://bigdata.svn.sourceforge.net/bigdata/?rev=4056&view=rev Author: thompsonbry Date: 2011-01-05 14:02:07 +0000 (Wed, 05 Jan 2011) Log Message: ----------- Added the termCache capacity to the dump of interesting effective property values. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java 2011-01-05 13:57:49 UTC (rev 4055) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java 2011-01-05 14:02:07 UTC (rev 4056) @@ -386,6 +386,22 @@ + tripleStore.getMaxParallelSubqueries() + "\n"); /* + * And show some interesting effective properties for the KB, SPO + * relation, and lexicon relation. + */ + sb.append("-- Interesting KB effective properties --\n"); + + sb + .append(AbstractTripleStore.Options.TERM_CACHE_CAPACITY + + "=" + + tripleStore + .getLexiconRelation() + .getProperties() + .getProperty( + AbstractTripleStore.Options.TERM_CACHE_CAPACITY, + AbstractTripleStore.Options.DEFAULT_TERM_CACHE_CAPACITY) + "\n"); + + /* * And show several interesting properties with their effective * defaults. */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-05 13:57:57
|
Revision: 4055 http://bigdata.svn.sourceforge.net/bigdata/?rev=4055&view=rev Author: thompsonbry Date: 2011-01-05 13:57:49 +0000 (Wed, 05 Jan 2011) Log Message: ----------- Added explicit dump of interesting BOP and AbstractResource effective property values. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java 2011-01-05 13:51:25 UTC (rev 4054) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java 2011-01-05 13:57:49 UTC (rev 4055) @@ -73,6 +73,9 @@ import org.openrdf.sail.SailException; import com.bigdata.LRUNexus; +import com.bigdata.bop.BufferAnnotations; +import com.bigdata.bop.IPredicate; +import com.bigdata.bop.join.PipelineJoin; import com.bigdata.btree.IndexMetadata; import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.IIndexManager; @@ -90,7 +93,6 @@ import com.bigdata.relation.AbstractResource; import com.bigdata.relation.RelationSchema; import com.bigdata.service.AbstractDistributedFederation; -import com.bigdata.service.AbstractFederation; import com.bigdata.service.IBigdataFederation; import com.bigdata.service.jini.JiniClient; import com.bigdata.sparse.ITPS; @@ -347,9 +349,6 @@ sb.append(BigdataSail.Options.STAR_JOINS + "=" + conn.getRepository().getSail().isStarJoins() + "\n"); - sb.append(AbstractResource.Options.MAX_PARALLEL_SUBQUERIES + "=" - + tripleStore.getMaxParallelSubqueries() + "\n"); - sb.append("-- All properties.--\n"); // get the triple store's properties from the global row store. @@ -362,6 +361,79 @@ sb.append(key + "=" + properties.get(key)+"\n"); } + /* + * And show some properties which can be inherited from + * AbstractResource. These have been mainly phased out in favor of + * BOP annotations, but there are a few places where they are still + * in use. + */ + + sb.append("-- Interesting AbstractResource effective properties --\n"); + + sb.append(AbstractResource.Options.CHUNK_CAPACITY + "=" + + tripleStore.getChunkCapacity() + "\n"); + + sb.append(AbstractResource.Options.CHUNK_OF_CHUNKS_CAPACITY + "=" + + tripleStore.getChunkOfChunksCapacity() + "\n"); + + sb.append(AbstractResource.Options.CHUNK_TIMEOUT + "=" + + tripleStore.getChunkTimeout() + "\n"); + + sb.append(AbstractResource.Options.FULLY_BUFFERED_READ_THRESHOLD + "=" + + tripleStore.getFullyBufferedReadThreshold() + "\n"); + + sb.append(AbstractResource.Options.MAX_PARALLEL_SUBQUERIES + "=" + + tripleStore.getMaxParallelSubqueries() + "\n"); + + /* + * And show several interesting properties with their effective + * defaults. + */ + + sb.append("-- Interesting Effective BOP Annotations --\n"); + + sb.append(BufferAnnotations.CHUNK_CAPACITY + + "=" + + tripleStore.getProperties().getProperty( + BufferAnnotations.CHUNK_CAPACITY, + "" + BufferAnnotations.DEFAULT_CHUNK_CAPACITY) + + "\n"); + + sb + .append(BufferAnnotations.CHUNK_OF_CHUNKS_CAPACITY + + "=" + + tripleStore + .getProperties() + .getProperty( + BufferAnnotations.CHUNK_OF_CHUNKS_CAPACITY, + "" + + BufferAnnotations.DEFAULT_CHUNK_OF_CHUNKS_CAPACITY) + + "\n"); + + sb.append(BufferAnnotations.CHUNK_TIMEOUT + + "=" + + tripleStore.getProperties().getProperty( + BufferAnnotations.CHUNK_TIMEOUT, + "" + BufferAnnotations.DEFAULT_CHUNK_TIMEOUT) + + "\n"); + + sb.append(PipelineJoin.Annotations.MAX_PARALLEL + + "=" + + tripleStore.getProperties().getProperty( + PipelineJoin.Annotations.MAX_PARALLEL, + "" + PipelineJoin.Annotations.DEFAULT_MAX_PARALLEL) + "\n"); + + sb + .append(IPredicate.Annotations.FULLY_BUFFERED_READ_THRESHOLD + + "=" + + tripleStore + .getProperties() + .getProperty( + IPredicate.Annotations.FULLY_BUFFERED_READ_THRESHOLD, + "" + + IPredicate.Annotations.DEFAULT_FULLY_BUFFERED_READ_THRESHOLD) + + "\n"); + // sb.append(tripleStore.predicateUsage()); } catch (Throwable t) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-05 13:51:33
|
Revision: 4054 http://bigdata.svn.sourceforge.net/bigdata/?rev=4054&view=rev Author: thompsonbry Date: 2011-01-05 13:51:25 +0000 (Wed, 05 Jan 2011) Log Message: ----------- Reduced several array capacity constants of 10000 or more to 100 in order to reduce the heap churn during query. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BufferAnnotations.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPredicate.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/AbstractChunkedTupleIterator.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/IChunkedIterator.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsIterator.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesPIterator.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesPOIterator.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesSPIterator.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesSPOIterator.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainTypeResourceIterator.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BufferAnnotations.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BufferAnnotations.java 2011-01-05 13:49:15 UTC (rev 4053) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BufferAnnotations.java 2011-01-05 13:51:25 UTC (rev 4054) @@ -51,8 +51,10 @@ /** * Default for {@link #CHUNK_OF_CHUNKS_CAPACITY} + * + * @todo Try smaller capacities in benchmarks */ - int DEFAULT_CHUNK_OF_CHUNKS_CAPACITY = 5;//trunk=1000 + int DEFAULT_CHUNK_OF_CHUNKS_CAPACITY = 5;//5;//trunk=1000 /** * Sets the capacity of the {@link IBuffer}[]s used to accumulate a chunk of Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPredicate.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPredicate.java 2011-01-05 13:49:15 UTC (rev 4053) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPredicate.java 2011-01-05 13:51:25 UTC (rev 4054) @@ -42,6 +42,7 @@ import com.bigdata.btree.filter.Advancer; import com.bigdata.btree.filter.TupleFilter; import com.bigdata.mdi.PartitionLocator; +import com.bigdata.rawstore.Bytes; import com.bigdata.relation.IRelation; import com.bigdata.relation.accesspath.AccessPath; import com.bigdata.relation.accesspath.ElementFilter; @@ -258,7 +259,7 @@ * @todo Experiment with this. It should probably be something close to * the branching factor, e.g., 100. */ - int DEFAULT_FULLY_BUFFERED_READ_THRESHOLD = 100;//trunk=20*Bytes.kilobyte32 + int DEFAULT_FULLY_BUFFERED_READ_THRESHOLD = 100;//trunk=20*Bytes.kilobyte32; /** * Specify the {@link IRangeQuery} flags for the {@link IAccessPath} ( Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/AbstractChunkedTupleIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/AbstractChunkedTupleIterator.java 2011-01-05 13:49:15 UTC (rev 4053) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/AbstractChunkedTupleIterator.java 2011-01-05 13:51:25 UTC (rev 4054) @@ -248,7 +248,7 @@ */ protected int getDefaultCapacity() { - return 100000; + return 100;//1000;//100000; } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java 2011-01-05 13:49:15 UTC (rev 4053) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java 2011-01-05 13:51:25 UTC (rev 4054) @@ -284,7 +284,7 @@ * main purpose of the capacity is to reduce the contention for the * {@link ReadWriteLock}. */ - final static protected int DEFAULT_CAPACITY = 10000; + final static protected int DEFAULT_CAPACITY = 100;//10000; /** * Creates a view of an unisolated index that will enforce the concurrency Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java 2011-01-05 13:49:15 UTC (rev 4053) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java 2011-01-05 13:51:25 UTC (rev 4054) @@ -166,12 +166,12 @@ * The default capacity for the internal {@link Queue} on which elements (or * chunks of elements) are buffered. */ - public static transient final int DEFAULT_PRODUCER_QUEUE_CAPACITY = 5000; + public static transient final int DEFAULT_PRODUCER_QUEUE_CAPACITY = 100;//5000; /** * The default minimum chunk size for the chunk combiner. */ - public static transient final int DEFAULT_MINIMUM_CHUNK_SIZE = 10000; + public static transient final int DEFAULT_MINIMUM_CHUNK_SIZE = 100;//10000; /** * The default timeout in milliseconds during which chunks of elements may Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/IChunkedIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/IChunkedIterator.java 2011-01-05 13:49:15 UTC (rev 4053) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/IChunkedIterator.java 2011-01-05 13:51:25 UTC (rev 4054) @@ -61,7 +61,7 @@ * * FIXME This is way too large. */ - int DEFAULT_CHUNK_SIZE = 10000; + int DEFAULT_CHUNK_SIZE = 100;//00; /** * The next element available from the iterator. Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsIterator.java 2011-01-05 13:49:15 UTC (rev 4053) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsIterator.java 2011-01-05 13:51:25 UTC (rev 4054) @@ -27,6 +27,8 @@ protected IV sameAs; + final int chunkSize = 100;//10000; + protected IChunkedOrderedIterator<ISPO> src; public BackchainOwlSameAsIterator(IChunkedOrderedIterator<ISPO> src, Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesPIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesPIterator.java 2011-01-05 13:49:15 UTC (rev 4053) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesPIterator.java 2011-01-05 13:51:25 UTC (rev 4054) @@ -148,7 +148,7 @@ // use a buffer so that we can do a more efficient batch contains // to filter out existing statements - int chunkSize = 10000; +// int chunkSize = 10000; SPO[] spos = new SPO[chunkSize]; int numSPOs = 0; // create a new link between {s,? sameAs s} X {o,? sameAs o} tuples @@ -199,7 +199,6 @@ } public ISPO[] nextChunk() { - final int chunkSize = 10000; ISPO[] s = new ISPO[chunkSize]; int n = 0; while (hasNext() && n < chunkSize) { Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesPOIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesPOIterator.java 2011-01-05 13:49:15 UTC (rev 4053) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesPOIterator.java 2011-01-05 13:51:25 UTC (rev 4054) @@ -100,7 +100,7 @@ // which might be present in the source iterator already // use a buffer so that we can do a more efficient batch contains // to filter out existing statements - int chunkSize = 10000; +// int chunkSize = 10000; SPO[] spos = new SPO[chunkSize]; int numSPOs = 0; // get all of o's sames @@ -112,7 +112,7 @@ db.getAccessPath(null, p, same).iterator(); while (reversePropsIt.hasNext()) { final ISPO reverseProp = reversePropsIt.next(); - // do not add ( s sameAs s ) inferences + // do not add ( s sameAs s ) inferences if (IVUtility.equals(reverseProp.p(), sameAs) && IVUtility.equals(reverseProp.s(), o)) { continue; @@ -229,7 +229,7 @@ // ignore sameAs properties // use a buffer so that we can do a more efficient batch contains // to filter out existing statements - int chunkSize = 10000; +// int chunkSize = 10000; final ISPO[] spos = new ISPO[chunkSize]; int numSPOs = 0; // get all of s's sames @@ -273,7 +273,6 @@ } public ISPO[] nextChunk() { - final int chunkSize = 10000; ISPO[] s = new ISPO[chunkSize]; int n = 0; while (hasNext() && n < chunkSize) { Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesSPIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesSPIterator.java 2011-01-05 13:49:15 UTC (rev 4053) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesSPIterator.java 2011-01-05 13:51:25 UTC (rev 4054) @@ -107,7 +107,7 @@ // which might be present in the source iterator already // use a buffer so that we can do a more efficient batch contains // to filter out existing statements - int chunkSize = 10000; +// int chunkSize = 10000; SPO[] spos = new SPO[chunkSize]; int numSPOs = 0; // get all of s's sames @@ -238,7 +238,7 @@ // ignore sameAs properties // use a buffer so that we can do a more efficient batch contains // to filter out existing statements - int chunkSize = 10000; +// int chunkSize = 10000; ISPO[] spos = new ISPO[chunkSize]; int numSPOs = 0; // get all of o's sames @@ -282,7 +282,7 @@ } public ISPO[] nextChunk() { - final int chunkSize = 10000; +// final int chunkSize = 10000; ISPO[] s = new ISPO[chunkSize]; int n = 0; while (hasNext() && n < chunkSize) { Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesSPOIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesSPOIterator.java 2011-01-05 13:49:15 UTC (rev 4053) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainOwlSameAsPropertiesSPOIterator.java 2011-01-05 13:51:25 UTC (rev 4054) @@ -108,7 +108,7 @@ // all of which might be present in the source iterator already // use a buffer so that we can do a more efficient batch contains // to filter out existing statements - int chunkSize = 10000; +// int chunkSize = 10000; SPO[] spos = new SPO[chunkSize]; int numSPOs = 0; // collect up the links between {s,? sameAs s} X {o,? sameAs o} @@ -196,7 +196,7 @@ } public ISPO[] nextChunk() { - final int chunkSize = 10000; +// final int chunkSize = 10000; ISPO[] s = new ISPO[chunkSize]; int n = 0; while (hasNext() && n < chunkSize) { Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainTypeResourceIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainTypeResourceIterator.java 2011-01-05 13:49:15 UTC (rev 4053) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainTypeResourceIterator.java 2011-01-05 13:51:25 UTC (rev 4054) @@ -20,7 +20,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ + */ /* * Created on Oct 30, 2007 */ @@ -69,973 +69,984 @@ * * @see InferenceEngine * @see InferenceEngine.Options - * + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ -public class BackchainTypeResourceIterator implements IChunkedOrderedIterator<ISPO> { + * @version $Id: BackchainTypeResourceIterator.java 3687 2010-09-29 22:50:32Z + * mrpersonick $ + */ +public class BackchainTypeResourceIterator implements + IChunkedOrderedIterator<ISPO> { - protected static final Logger log = Logger.getLogger(BackchainTypeResourceIterator.class); - - private final IChunkedOrderedIterator<ISPO> _src; - private final Iterator<ISPO> src; -// private final long s; -// private final AbstractTripleStore db; - private final IV rdfType, rdfsResource; - private final IKeyOrder<ISPO> keyOrder; + protected static final Logger log = Logger + .getLogger(BackchainTypeResourceIterator.class); - /** - * The subject(s) whose (s rdf:type rdfs:Resource) entailments will be - * visited. - */ - private PushbackIterator<IV> resourceIds; - - /** - * An iterator reading on the {@link SPOKeyOrder#POS} index. The predicate - * is bound to <code>rdf:type</code> and the object is bound to - * <code>rdfs:Resource</code>. If the subject was given to the ctor, then - * it will also be bound. The iterator visits the term identifier for the - * <em>subject</em> position. - */ - private PushbackIterator<IV> posItr; - - private boolean sourceExhausted = false; - - private boolean open = true; + private final IChunkedOrderedIterator<ISPO> _src; + private final Iterator<ISPO> src; + // private final long s; + // private final AbstractTripleStore db; + private final IV rdfType, rdfsResource; + private final IKeyOrder<ISPO> keyOrder; - /** - * This is set each time by {@link #nextChunk()} and inspected by - * {@link #nextChunk(IKeyOrder)} in order to decide whether the chunk needs - * to be sorted. - */ - private IKeyOrder<ISPO> chunkKeyOrder = null; + private final int chunkSize = 100;//10000; - /** - * The last {@link ISPO} visited by {@link #next()}. - */ - private ISPO current = null; + /** + * The subject(s) whose (s rdf:type rdfs:Resource) entailments will be + * visited. + */ + private PushbackIterator<IV> resourceIds; - /** - * Returns a suitably configured {@link BackchainTypeResourceIterator} -or- - * <i>src</i> iff the <i>accessPath</i> does not require the - * materialization of <code>(x rdf:type rdfs:Resource)</code> entailments. - * - * @param _src - * The source iterator. {@link #nextChunk()} will sort statements - * into the {@link IKeyOrder} reported by this iterator (as long - * as the {@link IKeyOrder} is non-<code>null</code>). - * @param accessPath - * The {@link IAccessPath} from which the <i>src</i> iterator - * was derived. Note that <i>src</i> is NOT necessarily - * equivalent to {@link IAccessPath#iterator()} since it MAY have - * been layered already to backchain other entailments, e.g., - * <code>owl:sameAs</code>. - * @param db - * The database from which we will read the distinct subject - * identifiers from its {@link SPORelation}. This parameter is - * used iff this is an all unbound triple pattern. - * @param rdfType - * The term identifier that corresponds to rdf:Type for the - * database. - * @param rdfsResource - * The term identifier that corresponds to rdf:Resource for the - * database. - * - * @return The backchain iterator -or- the <i>src</i> iterator iff the - * <i>accessPath</i> does not require the materialization of - * <code>(x rdf:type rdfs:Resource)</code> entailments. - */ - @SuppressWarnings("unchecked") - static public IChunkedOrderedIterator<ISPO> newInstance( - final IChunkedOrderedIterator<ISPO> _src, - final IAccessPath<ISPO> accessPath, final AbstractTripleStore db, - final IV rdfType, final IV rdfsResource) { - - if (accessPath == null) - throw new IllegalArgumentException(); - -// final SPO spo = new SPO(accessPath.getPredicate()); - final IPredicate<ISPO> pred = accessPath.getPredicate(); - final IV s = getTerm(pred, 0); - final IV p = getTerm(pred, 1); - final IV o = getTerm(pred, 2); + /** + * An iterator reading on the {@link SPOKeyOrder#POS} index. The predicate + * is bound to <code>rdf:type</code> and the object is bound to + * <code>rdfs:Resource</code>. If the subject was given to the ctor, then it + * will also be bound. The iterator visits the term identifier for the + * <em>subject</em> position. + */ + private PushbackIterator<IV> posItr; - if (((o == null || o.equals(rdfsResource)) && - (p == null || p.equals(rdfType))) == false) { - - /* - * Backchain will not generate any statements. - */ + private boolean sourceExhausted = false; - return _src; - - } - - if (_src == null) - throw new IllegalArgumentException(); - - if (db == null) - throw new IllegalArgumentException(); - - /* - * The subject(s) whose (s rdf:type rdfs:Resource) entailments will be - * visited. - */ - final PushbackIterator<IV> resourceIds; - - /* - * An iterator reading on the {@link SPOKeyOrder#POS} index. The - * predicate is bound to <code>rdf:type</code> and the object is bound - * to <code>rdfs:Resource</code>. If the subject was given to the - * ctor, then it will also be bound. The iterator visits the term - * identifier for the <em>subject</em> position. - */ - final PushbackIterator<IV> posItr; + private boolean open = true; - if (s == null) { + /** + * This is set each time by {@link #nextChunk()} and inspected by + * {@link #nextChunk(IKeyOrder)} in order to decide whether the chunk needs + * to be sorted. + */ + private IKeyOrder<ISPO> chunkKeyOrder = null; - /* - * Backchain will generate one statement for each distinct subject - * or object in the store. - * - * @todo This is Ok as long as you are forward chaining all of the - * rules that put a predicate or an object into the subject position - * since it will then have all resources. If you backward chain some - * of those rules, e.g., rdf1, then you MUST change this to read on - * the ids index and skip anything that is marked as a literal using - * the low bit of the term identifier but you will overgenerate for - * resources that are no longer in use by the KB (you could filter - * for that). - */ + /** + * The last {@link ISPO} visited by {@link #next()}. + */ + private ISPO current = null; -// resourceIds = db.getSPORelation().distinctTermScan(SPOKeyOrder.SPO); - - resourceIds = new PushbackIterator<IV>(new MergedOrderedIterator(// - db.getSPORelation().distinctTermScan(SPOKeyOrder.SPO), // - db.getSPORelation().distinctTermScan(SPOKeyOrder.OSP, - new ITermIVFilter() { - private static final long serialVersionUID = 1L; - public boolean isValid(IV iv) { - // filter out literals from the OSP scan. - return !iv.isLiteral(); - } - }))); + /** + * Returns a suitably configured {@link BackchainTypeResourceIterator} -or- + * <i>src</i> iff the <i>accessPath</i> does not require the materialization + * of <code>(x rdf:type rdfs:Resource)</code> entailments. + * + * @param _src + * The source iterator. {@link #nextChunk()} will sort statements + * into the {@link IKeyOrder} reported by this iterator (as long + * as the {@link IKeyOrder} is non-<code>null</code>). + * @param accessPath + * The {@link IAccessPath} from which the <i>src</i> iterator was + * derived. Note that <i>src</i> is NOT necessarily equivalent to + * {@link IAccessPath#iterator()} since it MAY have been layered + * already to backchain other entailments, e.g., + * <code>owl:sameAs</code>. + * @param db + * The database from which we will read the distinct subject + * identifiers from its {@link SPORelation}. This parameter is + * used iff this is an all unbound triple pattern. + * @param rdfType + * The term identifier that corresponds to rdf:Type for the + * database. + * @param rdfsResource + * The term identifier that corresponds to rdf:Resource for the + * database. + * + * @return The backchain iterator -or- the <i>src</i> iterator iff the + * <i>accessPath</i> does not require the materialization of + * <code>(x rdf:type rdfs:Resource)</code> entailments. + */ + @SuppressWarnings("unchecked") + static public IChunkedOrderedIterator<ISPO> newInstance( + final IChunkedOrderedIterator<ISPO> _src, + final IAccessPath<ISPO> accessPath, final AbstractTripleStore db, + final IV rdfType, final IV rdfsResource) { - /* - * Reading (? rdf:Type rdfs:Resource) using the POS index. - */ + if (accessPath == null) + throw new IllegalArgumentException(); - posItr = new PushbackIterator<IV>(new Striterator(db.getAccessPath( - null, rdfType, rdfsResource, - ExplicitSPOFilter.INSTANCE).iterator()) - .addFilter(new Resolver() { - private static final long serialVersionUID = 1L; - @Override - protected Object resolve(Object obj) { - return ((SPO) obj).s; - } - })); + // final SPO spo = new SPO(accessPath.getPredicate()); + final IPredicate<ISPO> pred = accessPath.getPredicate(); + final IV s = getTerm(pred, 0); + final IV p = getTerm(pred, 1); + final IV o = getTerm(pred, 2); - } else { + if (((o == null || o.equals(rdfsResource)) && (p == null || p + .equals(rdfType))) == false) { - /* - * Backchain will generate exactly one statement: (s rdf:type - * rdfs:Resource). - */ -/* - resourceIds = new PushbackIterator<Long>( - new ClosableSingleItemIterator<Long>(spo.s)); -*/ - /* - * Reading a single point (s type resource), so this will actually - * use the SPO index. - */ -/* - posItr = new PushbackIterator<Long>(new Striterator(db - .getAccessPath(spo.s, rdfType, rdfsResource, - ExplicitSPOFilter.INSTANCE).iterator()) - .addFilter(new Resolver() { - private static final long serialVersionUID = 1L; - @Override - protected Object resolve(Object obj) { - return Long.valueOf(((SPO) obj).s); - } - })); -*/ - return new BackchainSTypeResourceIterator - ( _src, accessPath, db, rdfType, rdfsResource - ); + /* + * Backchain will not generate any statements. + */ - } - - /* - * filters out (x rdf:Type rdfs:Resource) in case it is explicit in the - * db so that we do not generate duplicates for explicit type resource - * statement. - */ - final Iterator<ISPO> src = new Striterator(_src).addFilter(new Filter(){ + return _src; - private static final long serialVersionUID = 1L; + } - public boolean isValid(Object arg0) { + if (_src == null) + throw new IllegalArgumentException(); - final SPO o = (SPO) arg0; + if (db == null) + throw new IllegalArgumentException(); - if (o.p.equals(rdfType) && o.o.equals(rdfsResource)) { - - return false; - - } - - return true; - - }}); - - return new BackchainTypeResourceIterator(_src, src, resourceIds, - posItr, rdfType, rdfsResource); - - } - - private static IV getTerm(final IPredicate<ISPO> pred, final int pos) { - - final IVariableOrConstant<IV> term = pred.get(pos); - - return term == null || term.isVar() ? null : term.get(); - - } - - /** - * Create an iterator that will visit all statements in the source iterator - * and also backchain any entailments of the form (x rdf:type rdfs:Resource) - * which are valid for the given triple pattern. - * - * @param src - * The source iterator. {@link #nextChunk()} will sort statements - * into the {@link IKeyOrder} reported by this iterator (as long - * as the {@link IKeyOrder} is non-<code>null</code>). - * @param db - * The database from which we will read the distinct subject - * identifiers (iff this is an all unbound triple pattern). - * @param rdfType - * The term identifier that corresponds to rdf:Type for the - * database. - * @param rdfsResource - * The term identifier that corresponds to rdf:Resource for the - * database. - * - * @see #newInstance(IChunkedOrderedIterator, IAccessPath, - * AbstractTripleStore, long, long) - */ - @SuppressWarnings({ "unchecked", "serial" }) - private BackchainTypeResourceIterator(IChunkedOrderedIterator<ISPO> _src,// - Iterator<ISPO> src,// - PushbackIterator<IV> resourceIds,// - PushbackIterator<IV> posItr,// - final IV rdfType,// - final IV rdfsResource// - ) { - - // the raw source - we pass close() through to this. - this._src = _src; - - this.keyOrder = _src.getKeyOrder(); // MAY be null. - - // the source with (x type resource) filtered out. - this.src = src; - - // - this.resourceIds = resourceIds; - - this.posItr = posItr; - - this.rdfType = rdfType; - - this.rdfsResource = rdfsResource; - - } + /* + * The subject(s) whose (s rdf:type rdfs:Resource) entailments will be + * visited. + */ + final PushbackIterator<IV> resourceIds; - public IKeyOrder<ISPO> getKeyOrder() { + /* + * An iterator reading on the {@link SPOKeyOrder#POS} index. The + * predicate is bound to <code>rdf:type</code> and the object is bound + * to <code>rdfs:Resource</code>. If the subject was given to the ctor, + * then it will also be bound. The iterator visits the term identifier + * for the <em>subject</em> position. + */ + final PushbackIterator<IV> posItr; - return keyOrder; - - } + if (s == null) { - public void close() { + /* + * Backchain will generate one statement for each distinct subject + * or object in the store. + * + * @todo This is Ok as long as you are forward chaining all of the + * rules that put a predicate or an object into the subject position + * since it will then have all resources. If you backward chain some + * of those rules, e.g., rdf1, then you MUST change this to read on + * the ids index and skip anything that is marked as a literal using + * the low bit of the term identifier but you will overgenerate for + * resources that are no longer in use by the KB (you could filter + * for that). + */ - if(!open) return; - - // release any resources here. - - open = false; + // resourceIds = + // db.getSPORelation().distinctTermScan(SPOKeyOrder.SPO); - _src.close(); + resourceIds = new PushbackIterator<IV>(new MergedOrderedIterator(// + db.getSPORelation().distinctTermScan(SPOKeyOrder.SPO), // + db.getSPORelation().distinctTermScan(SPOKeyOrder.OSP, + new ITermIVFilter() { + private static final long serialVersionUID = 1L; - resourceIds.close(); - - resourceIds = null; - - if (posItr != null) { + public boolean isValid(IV iv) { + // filter out literals from the OSP scan. + return !iv.isLiteral(); + } + }))); - posItr.close(); - - } - - } + /* + * Reading (? rdf:Type rdfs:Resource) using the POS index. + */ - public boolean hasNext() { - - if (!open) { + posItr = new PushbackIterator<IV>(new Striterator(db.getAccessPath( + null, rdfType, rdfsResource, ExplicitSPOFilter.INSTANCE) + .iterator()).addFilter(new Resolver() { + private static final long serialVersionUID = 1L; - // the iterator has been closed. - - return false; - - } + @Override + protected Object resolve(Object obj) { + return ((SPO) obj).s; + } + })); - if (!sourceExhausted) { + } else { - if (src.hasNext()) { + /* + * Backchain will generate exactly one statement: (s rdf:type + * rdfs:Resource). + */ + /* + * resourceIds = new PushbackIterator<Long>( new + * ClosableSingleItemIterator<Long>(spo.s)); + */ + /* + * Reading a single point (s type resource), so this will actually + * use the SPO index. + */ + /* + * posItr = new PushbackIterator<Long>(new Striterator(db + * .getAccessPath(spo.s, rdfType, rdfsResource, + * ExplicitSPOFilter.INSTANCE).iterator()) .addFilter(new Resolver() + * { private static final long serialVersionUID = 1L; + * + * @Override protected Object resolve(Object obj) { return + * Long.valueOf(((SPO) obj).s); } })); + */ + return new BackchainSTypeResourceIterator(_src, accessPath, db, + rdfType, rdfsResource); - // still consuming the source iterator. + } - return true; + /* + * filters out (x rdf:Type rdfs:Resource) in case it is explicit in the + * db so that we do not generate duplicates for explicit type resource + * statement. + */ + final Iterator<ISPO> src = new Striterator(_src) + .addFilter(new Filter() { - } + private static final long serialVersionUID = 1L; - // the source iterator is now exhausted. + public boolean isValid(Object arg0) { - sourceExhausted = true; + final SPO o = (SPO) arg0; - _src.close(); + if (o.p.equals(rdfType) && o.o.equals(rdfsResource)) { - } + return false; - if (resourceIds.hasNext()) { + } - // still consuming the subjects iterator. - - return true; - - } - - // the subjects iterator is also exhausted so we are done. - - return false; - - } + return true; - /** - * Visits all {@link SPO}s visited by the source iterator and then begins - * to backchain ( x rdf:type: rdfs:Resource ) statements. - * <p> - * The "backchain" scans two iterators: an {@link IChunkedOrderedIterator} - * on <code>( ? rdf:type - * rdfs:Resource )</code> that reads on the database - * (this tells us whether we have an explicit - * <code>(x rdf:type rdfs:Resource)</code> in the database for a given - * subject) and iterator that reads on the term identifiers for the distinct - * resources in the database (this bounds the #of backchained statements - * that we will emit). - * <p> - * For each value visited by the {@link #resourceIds} iterator we examine - * the statement iterator. If the next value that would be visited by the - * statement iterator is an explicit statement for the current subject, then - * we emit the explicit statement. Otherwise we emit an inferred statement. - */ - public ISPO next() { + } + }); - if (!hasNext()) { + return new BackchainTypeResourceIterator(_src, src, resourceIds, + posItr, rdfType, rdfsResource); - throw new NoSuchElementException(); - - } + } - if (src.hasNext()) { + private static IV getTerm(final IPredicate<ISPO> pred, final int pos) { - return current = src.next(); - - } else if(resourceIds.hasNext()) { + final IVariableOrConstant<IV> term = pred.get(pos); - /* - * Examine resourceIds and posItr. - */ - - // resourceIds is the source for _inferences_ - final IV s1 = resourceIds.next(); - - if(posItr.hasNext()) { - - // posItr is the source for _explicit_ statements. - final IV s2 = posItr.next(); - - final int cmp = s1.compareTo(s2); - - if (cmp < 0) { + return term == null || term.isVar() ? null : term.get(); - /* - * Consuming from [resourceIds] (the term identifier ordered - * LT the next term identifier from [posItr]). - * - * There is NOT an explicit statement from [posItr], so emit - * as an inference and pushback on [posItr]. - */ - - current = new SPO(s1, rdfType, rdfsResource, - StatementEnum.Inferred); + } - posItr.pushback(); - - } else { - - /* - * Consuming from [posItr]. - * - * There is an explicit statement for the current term - * identifer from [resourceIds]. - */ - - if (cmp != 0) { - - /* - * Since [resourceIds] and [posItr] are NOT visiting the - * same term identifier, we pushback on [resourceIds]. - * - * Note: When they DO visit the same term identifier - * then we only emit the explicit statement and we - * consume (rather than pushback) from [resourceIds]. - */ - - resourceIds.pushback(); - - } - - current = new SPO(s2, rdfType, rdfsResource, - StatementEnum.Explicit); + /** + * Create an iterator that will visit all statements in the source iterator + * and also backchain any entailments of the form (x rdf:type rdfs:Resource) + * which are valid for the given triple pattern. + * + * @param src + * The source iterator. {@link #nextChunk()} will sort statements + * into the {@link IKeyOrder} reported by this iterator (as long + * as the {@link IKeyOrder} is non-<code>null</code>). + * @param db + * The database from which we will read the distinct subject + * identifiers (iff this is an all unbound triple pattern). + * @param rdfType + * The term identifier that corresponds to rdf:Type for the + * database. + * @param rdfsResource + * The term identifier that corresponds to rdf:Resource for the + * database. + * + * @see #newInstance(IChunkedOrderedIterator, IAccessPath, + * AbstractTripleStore, long, long) + */ + @SuppressWarnings( { "unchecked", "serial" }) + private BackchainTypeResourceIterator(IChunkedOrderedIterator<ISPO> _src,// + Iterator<ISPO> src,// + PushbackIterator<IV> resourceIds,// + PushbackIterator<IV> posItr,// + final IV rdfType,// + final IV rdfsResource// + ) { - } - - } else { - - /* - * [posItr] is exhausted so just emit inferences based on - * [resourceIds]. - */ - - current = new SPO(s1, rdfType, rdfsResource, - StatementEnum.Inferred); - - } + // the raw source - we pass close() through to this. + this._src = _src; - return current; + this.keyOrder = _src.getKeyOrder(); // MAY be null. - } else { - - /* - * Finish off the [posItr]. Anything from this source is an explicit (? - * type resource) statement. - */ - - assert posItr.hasNext(); - - return new SPO(posItr.next(), rdfType, rdfsResource, - StatementEnum.Explicit); - - } - - } + // the source with (x type resource) filtered out. + this.src = src; - /** - * Note: This method preserves the {@link IKeyOrder} of the source iterator - * iff it is reported by {@link #getKeyOrder()}. Otherwise chunks read from - * the source iterator will be in whatever order that iterator is using - * while chunks containing backchained entailments will be in - * {@link SPOKeyOrder#POS} order. - * <p> - * Note: In order to ensure that a consistent ordering is always used within - * a chunk the backchained entailments will always begin on a chunk - * boundary. - */ - public ISPO[] nextChunk() { + // + this.resourceIds = resourceIds; - final int chunkSize = 10000; - - if (!hasNext()) - throw new NoSuchElementException(); - - if(!sourceExhausted) { - - /* - * Return a chunk from the source iterator. - * - * Note: The chunk will be in the order used by the source iterator. - * If the source iterator does not report that order then - * [chunkKeyOrder] will be null. - */ - - chunkKeyOrder = keyOrder; + this.posItr = posItr; - ISPO[] s = new ISPO[chunkSize]; + this.rdfType = rdfType; - int n = 0; - - while(src.hasNext() && n < chunkSize ) { - - s[n++] = src.next(); - - } - - ISPO[] stmts = new ISPO[n]; - - // copy so that stmts[] is dense. - System.arraycopy(s, 0, stmts, 0, n); - - return stmts; - - } + this.rdfsResource = rdfsResource; - /* - * Create a "chunk" of entailments. - * - * Note: This chunk will be in natural POS order since that is the index - * that we scan to decide whether or not there was an explicit ( x - * rdf:type rdfs:Resource ) while we consume the [subjects] in termId - * order. - */ - - IV[] s = new IV[chunkSize]; - - int n = 0; - - while(resourceIds.hasNext() && n < chunkSize ) { - - s[n++] = resourceIds.next(); - - } - - SPO[] stmts = new SPO[n]; - - for(int i=0; i<n; i++) { - - stmts[i] = new SPO(s[i], rdfType, rdfsResource, - StatementEnum.Inferred); - - } - - if (keyOrder != null && keyOrder != SPOKeyOrder.POS) { + } - /* - * Sort into the same order as the source iterator. - * - * Note: We have to sort explicitly since we are scanning the POS - * index - */ + public IKeyOrder<ISPO> getKeyOrder() { - Arrays.sort(stmts, 0, stmts.length, keyOrder.getComparator()); + return keyOrder; - } + } - /* - * The chunk will be in POS order since that is how we are scanning the - * indices. - */ - - chunkKeyOrder = SPOKeyOrder.POS; - - return stmts; - - } + public void close() { - public ISPO[] nextChunk(IKeyOrder<ISPO> keyOrder) { - - if (keyOrder == null) - throw new IllegalArgumentException(); + if (!open) + return; - final ISPO[] stmts = nextChunk(); - - if (chunkKeyOrder != keyOrder) { + // release any resources here. - // sort into the required order. + open = false; - Arrays.sort(stmts, 0, stmts.length, keyOrder.getComparator()); + _src.close(); - } + resourceIds.close(); - return stmts; - - } + resourceIds = null; - /** - * Note: You can not "remove" the backchained entailments. If the last - * statement visited by {@link #next()} is "explicit" then the request is - * delegated to the source iterator. - */ - public void remove() { + if (posItr != null) { - if (!open) - throw new IllegalStateException(); - - if (current == null) - throw new IllegalStateException(); - - if(current.isExplicit()) { - - /* - * Delegate the request to the source iterator. - */ - - src.remove(); - - } - - current = null; - - } - - /** - * Reads on two iterators visiting elements in some natural order and visits - * their order preserving merge (no duplicates). - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - * @param <T> - */ - private static class MergedOrderedIterator<T extends Comparable<T>> - implements IChunkedIterator<T> { - - private final IChunkedIterator<T> src1; - private final IChunkedIterator<T> src2; - - public MergedOrderedIterator(IChunkedIterator<T> src1, - IChunkedIterator<T> src2) { + posItr.close(); - this.src1 = src1; - - this.src2 = src2; - - } - - public void close() { - - src1.close(); - - src2.close(); - - } + } - /** - * Note: Not implemented since not used above and this class is private. - */ - public T[] nextChunk() { - throw new UnsupportedOperationException(); - } + } - public boolean hasNext() { + public boolean hasNext() { - return tmp1 != null || tmp2 != null || src1.hasNext() - || src2.hasNext(); - - } - - private T tmp1; - private T tmp2; - - public T next() { + if (!open) { - if(!hasNext()) throw new NoSuchElementException(); - - if (tmp1 == null && src1.hasNext()) { + // the iterator has been closed. - tmp1 = src1.next(); + return false; - } - - if (tmp2 == null && src2.hasNext()) { + } - tmp2 = src2.next(); + if (!sourceExhausted) { - } - - if (tmp1 == null) { + if (src.hasNext()) { - // src1 is exhausted so deliver from src2. - final T tmp = tmp2; + // still consuming the source iterator. - tmp2 = null; + return true; - return tmp; + } - } - - if (tmp2 == null) { + // the source iterator is now exhausted. - // src2 is exhausted so deliver from src1. - final T tmp = tmp1; + sourceExhausted = true; - tmp1 = null; + _src.close(); - return tmp; + } - } + if (resourceIds.hasNext()) { - final int cmp = tmp1.compareTo(tmp2); + // still consuming the subjects iterator. - if (cmp == 0) { + return true; - final T tmp = tmp1; + } - tmp1 = tmp2 = null; + // the subjects iterator is also exhausted so we are done. - return tmp; + return false; - } else if (cmp < 0) { + } - final T tmp = tmp1; + /** + * Visits all {@link SPO}s visited by the source iterator and then begins to + * backchain ( x rdf:type: rdfs:Resource ) statements. + * <p> + * The "backchain" scans two iterators: an {@link IChunkedOrderedIterator} + * on <code>( ? rdf:type + * rdfs:Resource )</code> that reads on the database (this tells us whether + * we have an explicit <code>(x rdf:type rdfs:Resource)</code> in the + * database for a given subject) and iterator that reads on the term + * identifiers for the distinct resources in the database (this bounds the + * #of backchained statements that we will emit). + * <p> + * For each value visited by the {@link #resourceIds} iterator we examine + * the statement iterator. If the next value that would be visited by the + * statement iterator is an explicit statement for the current subject, then + * we emit the explicit statement. Otherwise we emit an inferred statement. + */ + public ISPO next() { - tmp1 = null; + if (!hasNext()) { - return tmp; + throw new NoSuchElementException(); - } else { + } - final T tmp = tmp2; + if (src.hasNext()) { - tmp2 = null; + return current = src.next(); - return tmp; + } else if (resourceIds.hasNext()) { - } - - } + /* + * Examine resourceIds and posItr. + */ - public void remove() { + // resourceIds is the source for _inferences_ + final IV s1 = resourceIds.next(); - throw new UnsupportedOperationException(); - - } + if (posItr.hasNext()) { - } + // posItr is the source for _explicit_ statements. + final IV s2 = posItr.next(); - /** - * Filterator style construct that allows push back of a single visited - * element. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - * @param <E> - */ - public static class PushbackFilter<E> extends FilterBase { + final int cmp = s1.compareTo(s2); - /** - * - */ - private static final long serialVersionUID = -8010263934867149205L; + if (cmp < 0) { - @SuppressWarnings("unchecked") - public PushbackIterator<E> filterOnce(Iterator src, Object context) { + /* + * Consuming from [resourceIds] (the term identifier ordered + * LT the next term identifier from [posItr]). + * + * There is NOT an explicit statement from [posItr], so emit + * as an inference and pushback on [posItr]. + */ - return new PushbackIterator<E>((Iterator<E>) src); + current = new SPO(s1, rdfType, rdfsResource, + StatementEnum.Inferred); - } + posItr.pushback(); - } + } else { - /** - * Implementation class for {@link PushbackFilter}. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - * @param <E> - */ - public static class PushbackIterator<E> implements Iterator<E>, - ICloseableIterator<E> { + /* + * Consuming from [posItr]. + * + * There is an explicit statement for the current term + * identifer from [resourceIds]. + */ - private final Iterator<E> src; + if (cmp != 0) { - /** - * The most recent element visited by the iterator. - */ - private E current; - - /** - * When non-<code>null</code>, this element was pushed back and - * is the next element to be visited. - */ - private E buffer; + /* + * Since [resourceIds] and [posItr] are NOT visiting the + * same term identifier, we pushback on [resourceIds]. + * + * Note: When they DO visit the same term identifier + * then we only emit the explicit statement and we + * consume (rather than pushback) from [resourceIds]. + */ - public PushbackIterator(final Iterator<E> src) { + resourceIds.pushback(); - if (src == null) - throw new IllegalArgumentException(); + } - this.src = src; + current = new SPO(s2, rdfType, rdfsResource, + StatementEnum.Explicit); - } + } - public boolean hasNext() { + } else { - return buffer != null || src.hasNext(); + /* + * [posItr] is exhausted so just emit inferences based on + * [resourceIds]. + */ - } + current = new SPO(s1, rdfType, rdfsResource, + StatementEnum.Inferred); - public E next() { + } - if (!hasNext()) - throw new NoSuchElementException(); + return current; - final E tmp; + } else { - if (buffer != null) { + /* + * Finish off the [posItr]. Anything from this source is an explicit + * (? type resource) statement. + */ - tmp = buffer; + assert posItr.hasNext(); - buffer = null; + return new SPO(posItr.next(), rdfType, rdfsResource, + StatementEnum.Explicit); - } else { + } - tmp = src.next(); + } - } + /** + * Note: This method preserves the {@link IKeyOrder} of the source iterator + * iff it is reported by {@link #getKeyOrder()}. Otherwise chunks read from + * the source iterator will be in whatever order that iterator is using + * while chunks containing backchained entailments will be in + * {@link SPOKeyOrder#POS} order. + * <p> + * Note: In order to ensure that a consistent ordering is always used within + * a chunk the backchained entailments will always begin on a chunk + * boundary. + */ + public ISPO[] nextChunk() { - current = tmp; - - return tmp; + if (!hasNext()) + throw new NoSuchElementException(); - } + if (!sourceExhausted) { - /** - * Push the value onto the internal buffer. It will be returned by the - * next call to {@link #next()}. + /* + * Return a chunk from the source iterator. + * + * Note: The chunk will be in the order used by the source iterator. + * If the source iterator does not report that order then + * [chunkKeyOrder] will be null. + */ + + chunkKeyOrder = keyOrder; + + ISPO[] s = new ISPO[chunkSize]; + + int n = 0; + + while (src.hasNext() && n < chunkSize) { + + s[n++] = src.next(); + + } + + ISPO[] stmts = new ISPO[n]; + + // copy so that stmts[] is dense. + System.arraycopy(s, 0, stmts, 0, n); + + return stmts; + + } + + /* + * Create a "chunk" of entailments. + * + * Note: This chunk will be in natural POS order since that is the index + * that we scan to decide whether or not there was an explicit ( x + * rdf:type rdfs:Resource ) while we consume the [subjects] in termId + * order. + */ + + IV[] s = new IV[chunkSize]; + + int n = 0; + + while (resourceIds.hasNext() && n < chunkSize) { + + s[n++] = resourceIds.next(); + + } + + SPO[] stmts = new SPO[n]; + + for (int i = 0; i < n; i++) { + + stmts[i] = new SPO(s[i], rdfType, rdfsResource, + StatementEnum.Inferred); + + } + + if (keyOrder != null && keyOrder != SPOKeyOrder.POS) { + + /* + * Sort into the same order as the source iterator. + * + * Note: We have to sort explicitly since we are scanning the POS + * index + */ + + Arrays.sort(stmts, 0, stmts.length, keyOrder.getComparator()); + + } + + /* + * The chunk will be in POS order since that is how we are scanning the + * indices. + */ + + chunkKeyOrder = SPOKeyOrder.POS; + + return stmts; + + } + + public ISPO[] nextChunk(IKeyOrder<ISPO> keyOrder) { + + if (keyOrder == null) + throw new IllegalArgumentException(); + + final ISPO[] stmts = nextChunk(); + + if (chunkKeyOrder != keyOrder) { + + // sort into the required order. + + Arrays.sort(stmts, 0, stmts.length, keyOrder.getComparator()); + + } + + return stmts; + + } + + /** + * Note: You can not "remove" the backchained entailments. If the last + * statement visited by {@link #next()} is "explicit" then the request is + * delegated to the source iterator. + */ + public void remove() { + + if (!open) + throw new IllegalStateException(); + + if (current == null) + throw new IllegalStateException(); + + if (current.isExplicit()) { + + /* + * Delegate the request to the source iterator. + */ + + src.remove(); + + } + + current = null; + + } + + /** + * Reads on two iterators visiting elements in some natural order and visits + * their order preserving merge (no duplicates). + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + * @version $Id: BackchainTypeResourceIterator.java 3687 2010-09-29 + * 22:50:32Z mrpersonick $ + * @param <T> + */ + private static class MergedOrderedIterator<T extends Comparable<T>> + implements IChunkedIterator<T> { + + private final IChunkedIterator<T> src1; + private final IChunkedIterator<T> src2; + + public MergedOrderedIterator(IChunkedIterator<T> src1, + IChunkedIterator<T> src2) { + + this.src1 = src1; + + this.src2 = src2; + + } + + public void close() { + + src1.close(); + + src2.close(); + + } + + /** + * Note: Not implemented since not used above and this class is private. + */ + public T[] nextChunk() { + throw new UnsupportedOperationException(); + } + + public boolean hasNext() { + + return tmp1 != null || tmp2 != null || src1.hasNext() + || src2.hasNext(); + + } + + private T tmp1; + private T tmp2; + + public T next() { + + if (!hasNext()) + throw new NoSuchElementException(); + + if (tmp1 == null && src1.hasNext()) { + + tmp1 = src1.next(); + + } + + if (tmp2 == null && src2.hasNext()) { + + tmp2 = src2.next(); + + } + + if (tmp1 == null) { + + // src1 is exhausted so deliver from src2. + final T tmp = tmp2; + + tmp2 = null; + + return tmp; + + } + + if (tmp2 == null) { + + // src2 is exhausted so deliver from src1. + final T tmp = tmp1; + + tmp1 = null; + + return tmp; + + } + + final int cmp = tmp1.compareTo(tmp2); + + if (cmp == 0) { + + final T tmp = tmp1; + + tmp1 = tmp2 = null; + + return tmp; + + } else if (cmp < 0) { + + final T tmp = tmp1; + + tmp1 = null; + + return tmp; + + } else { + + final T tmp = tmp2; + + tmp2 = null; + + return tmp; + + } + + } + + public void remove() { + + throw new UnsupportedOperationException(); + + } + + } + + /** + * Filterator style construct that allows push back of a single visited + * element. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + * @version $Id: BackchainTypeResourceIterator.java 3687 2010-09-29 + * 22:50:32Z mrpersonick $ + * @param <E> + */ + public static class PushbackFilter<E> extends FilterBase { + + /** * - * @param value - * The value. - * - * @throws IllegalStateException - * if there is already a value pushed back. */ - public void pushback() { + private static final long serialVersionUID = -8010263934867149205L; - if (buffer != null) - throw new IllegalStateException(); - - // pushback the last visited element. - buffer = current; - - } - - public void remove() { + @SuppressWarnings("unchecked") + public PushbackIterator<E> filterOnce(Iterator src, Object context) { - throw new UnsupportedOperationException(); + return new PushbackIterator<E>((Iterator<E>) src); - } + } - public void close() { + } - if(src instanceof ICloseableIterator) { + /** + * Implementation class for {@link PushbackFilter}. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + * @version $Id: BackchainTypeResourceIterator.java 3687 2010-09-29 + * 22:50:32Z mrpersonick $ + * @param <E> + */ + public static class PushbackIterator<E> implements Iterator<E>, + ICloseableIterator<E> { - ((ICloseableIterator<E>)src).close(); - - } - - } + private final Iterator<E> src; - } - - private static class BackchainSTypeResourceIterator - implements IChunkedOrderedIterator<ISPO> { + /** + * The most recent element visited by the iterator. + */ + private E current; - private final IChunkedOrderedIterator<ISPO> _src; - private final IAccessPath<ISPO> accessPath; - private final AbstractTripleStore db; - private final IV rdfType; - private final IV rdfsResource; - private final IV s; - private IChunkedOrderedIterator<ISPO> appender; - private boolean canRemove; - - public BackchainSTypeResourceIterator( - final IChunkedOrderedIterator<ISPO> _src, - final IAccessPath<ISPO> accessPath, final AbstractTripleStore db, - final IV rdfType, final IV rdfsResource) { - this._src = _src; - this.accessPath = accessPath; - this.db = db; - this.rdfType = rdfType; - this.rdfsResource = rdfsResource; - this.s = (IV) accessPath... [truncated message content] |
From: <tho...@us...> - 2011-01-05 13:49:23
|
Revision: 4053 http://bigdata.svn.sourceforge.net/bigdata/?rev=4053&view=rev Author: thompsonbry Date: 2011-01-05 13:49:15 +0000 (Wed, 05 Jan 2011) Log Message: ----------- Mainly an (inadvertent) whitespace change. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2011-01-05 13:45:21 UTC (rev 4052) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2011-01-05 13:49:15 UTC (rev 4053) @@ -20,7 +20,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ + */ /* * Created on Aug 18, 2010 */ @@ -104,65 +104,66 @@ * suite. */ public class PipelineJoin<E> extends PipelineOp implements - IShardwisePipelineOp<E> { + IShardwisePipelineOp<E> { - static private final transient Logger log = Logger.getLogger(PipelineJoin.class); + static private final transient Logger log = Logger + .getLogger(PipelineJoin.class); - /** + /** * */ - private static final long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; - public interface Annotations extends PipelineOp.Annotations { + public interface Annotations extends PipelineOp.Annotations { /** * The {@link IPredicate} which is used to generate the * {@link IAccessPath}s during the join. */ String PREDICATE = PipelineJoin.class.getName() + ".predicate"; - - /** - * An optional {@link IVariable}[] identifying the variables to be - * retained in the {@link IBindingSet}s written out by the operator. - * All variables are retained unless this annotation is specified. - */ - String SELECT = PipelineJoin.class.getName() + ".select"; - - /** - * An optional {@link IConstraint}[] which places restrictions on the - * legal patterns in the variable bindings. - */ - String CONSTRAINTS = PipelineJoin.class.getName() + ".constraints"; - /** - * Marks the join as "optional" in the SPARQL sense. Binding sets which - * fail the join will be routed to the alternative sink as specified by - * either {@link PipelineOp.Annotations#ALT_SINK_REF} or - * {@link PipelineOp.Annotations#ALT_SINK_GROUP}. - * - * @see #DEFAULT_OPTIONAL - */ - String OPTIONAL = PipelineJoin.class.getName() + ".optional"; + /** + * An optional {@link IVariable}[] identifying the variables to be + * retained in the {@link IBindingSet}s written out by the operator. All + * variables are retained unless this annotation is specified. + */ + String SELECT = PipelineJoin.class.getName() + ".select"; - boolean DEFAULT_OPTIONAL = false; + /** + * An optional {@link IConstraint}[] which places restrictions on the + * legal patterns in the variable bindings. + */ + String CONSTRAINTS = PipelineJoin.class.getName() + ".constraints"; - /** - * The maximum parallelism with which the pipeline will consume the - * source {@link IBindingSet}[] chunk. - * <p> - * Note: When ZERO (0), everything will run in the caller's - * {@link Thread}, but there will still be one thread per pipeline join - * task which is executing concurrently against different source chunks. - * When GT ZERO (0), tasks will run on an {@link ExecutorService} with - * the specified maximum parallelism. - * - * @see #DEFAULT_MAX_PARALLEL - */ - String MAX_PARALLEL = PipelineJoin.class.getName() + ".maxParallel"; + /** + * Marks the join as "optional" in the SPARQL sense. Binding sets which + * fail the join will be routed to the alternative sink as specified by + * either {@link PipelineOp.Annotations#ALT_SINK_REF} or + * {@link PipelineOp.Annotations#ALT_SINK_GROUP}. + * + * @see #DEFAULT_OPTIONAL + */ + String OPTIONAL = PipelineJoin.class.getName() + ".optional"; - int DEFAULT_MAX_PARALLEL = 0; + boolean DEFAULT_OPTIONAL = false; /** + * The maximum parallelism with which the pipeline will consume the + * source {@link IBindingSet}[] chunk. + * <p> + * Note: When ZERO (0), everything will run in the caller's + * {@link Thread}, but there will still be one thread per pipeline join + * task which is executing concurrently against different source chunks. + * When GT ZERO (0), tasks will run on an {@link ExecutorService} with + * the specified maximum parallelism. + * + * @see #DEFAULT_MAX_PARALLEL + */ + String MAX_PARALLEL = PipelineJoin.class.getName() + ".maxParallel"; + + int DEFAULT_MAX_PARALLEL = 0; + + /** * When <code>true</code>, binding sets observed in the same chunk which * have the binding pattern on the variables for the access path will be * coalesced into a single access path (default @@ -172,7 +173,7 @@ * does NOT reduce the #of solutions generated. * <p> * This option can cause some error in the join hit ratio when it is - * estimated from a cutoff join. + * estimated from a cutoff join. * * @see PipelineJoinStats#getJoinHitRatio() * @@ -194,16 +195,16 @@ String LIMIT = PipelineJoin.class.getName() + ".limit"; long DEFAULT_LIMIT = Long.MAX_VALUE; - - } - /** - * Extended statistics for the join operator. - */ - public static class PipelineJoinStats extends BOpStats { + } - private static final long serialVersionUID = 1L; - + /** + * Extended statistics for the join operator. + */ + public static class PipelineJoinStats extends BOpStats { + + private static final long serialVersionUID = 1L; + /** * The #of duplicate access paths which were detected and filtered out. */ @@ -294,492 +295,505 @@ return 0; return ((double) out) / in; } - - /** - * The #of chunks read from an {@link IAccessPath}. - */ - public final CAT accessPathChunksIn = new CAT(); - - /** - * The #of elements read from an {@link IAccessPath}. - */ - public final CAT accessPathUnitsIn = new CAT(); -// /** -// * The maximum observed fan in for this join dimension (maximum #of -// * sources observed writing on any join task for this join dimension). -// * Since join tasks may be closed and new join tasks re-opened for the -// * same query, join dimension and index partition, and since each join -// * task for the same join dimension could, in principle, have a -// * different fan in based on the actual binding sets propagated this is -// * not necessarily the "actual" fan in for the join dimension. You would -// * have to track the #of distinct partitionId values to track that. -// */ -// public int fanIn; -// -// /** -// * The maximum observed fan out for this join dimension (maximum #of -// * sinks on which any join task is writing for this join dimension). -// * Since join tasks may be closed and new join tasks re-opened for the -// * same query, join dimension and index partition, and since each join -// * task for the same join dimension could, in principle, have a -// * different fan out based on the actual binding sets propagated this is -// * not necessarily the "actual" fan out for the join dimension. -// */ -// public int fanOut; + /** + * The #of chunks read from an {@link IAccessPath}. + */ + public final CAT accessPathChunksIn = new CAT(); - public void add(final BOpStats o) { + /** + * The #of elements read from an {@link IAccessPath}. + */ + public final CAT accessPathUnitsIn = new CAT(); - super.add(o); - - if (o instanceof PipelineJoinStats) { + // /** + // * The maximum observed fan in for this join dimension (maximum #of + // * sources observed writing on any join task for this join dimension). + // * Since join tasks may be closed and new join tasks re-opened for the + // * same query, join dimension and index partition, and since each join + // * task for the same join dimension could, in principle, have a + // * different fan in based on the actual binding sets propagated this + // is + // * not necessarily the "actual" fan in for the join dimension. You + // would + // * have to track the #of distinct partitionId values to track that. + // */ + // public int fanIn; + // + // /** + // * The maximum observed fan out for this join dimension (maximum #of + // * sinks on which any join task is writing for this join dimension). + // * Since join tasks may be closed and new join tasks re-opened for the + // * same query, join dimension and index partition, and since each join + // * task for the same join dimension could, in principle, have a + // * different fan out based on the actual binding sets propagated this + // is + // * not necessarily the "actual" fan out for the join dimension. + // */ + // public int fanOut; - final PipelineJoinStats t = (PipelineJoinStats) o; + public void add(final BOpStats o) { - accessPathDups.add(t.accessPathDups.get()); + super.add(o); - accessPathCount.add(t.accessPathCount.get()); + if (o instanceof PipelineJoinStats) { - accessPathRangeCount.add(t.accessPathRangeCount.get()); + final PipelineJoinStats t = (PipelineJoinStats) o; - accessPathChunksIn.add(t.accessPathChunksIn.get()); + accessPathDups.add(t.accessPathDups.get()); - accessPathUnitsIn.add(t.accessPathUnitsIn.get()); + accessPathCount.add(t.accessPathCount.get()); - inputSolutions.add(t.inputSolutions.get()); + accessPathRangeCount.add(t.accessPathRangeCount.get()); - outputSolutions.add(t.outputSolutions.get()); + accessPathChunksIn.add(t.accessPathChunksIn.get()); -// if (t.fanIn > this.fanIn) { -// // maximum reported fanIn for this join dimension. -// this.fanIn = t.fanIn; -// } -// if (t.fanOut > this.fanOut) { -// // maximum reported fanOut for this join dimension. -// this.fanOut += t.fanOut; -// } + accessPathUnitsIn.add(t.accessPathUnitsIn.get()); - } - - } - - @Override - protected void toString(final StringBuilder sb) { - sb.append(",accessPathDups=" + accessPathDups.get()); - sb.append(",accessPathCount=" + accessPathCount.get()); - sb.append(",accessPathRangeCount=" + accessPathRangeCount.get()); - sb.append(",accessPathChunksIn=" + accessPathChunksIn.get()); - sb.append(",accessPathUnitsIn=" + accessPathUnitsIn.get()); - sb.append(",inputSolutions=" + inputSolutions.get()); - sb.append(",outputSolutions=" + outputSolutions.get()); - sb.append(",joinHitRatio=" + getJoinHitRatio()); - } - - } + inputSolutions.add(t.inputSolutions.get()); - /** - * Deep copy constructor. - * - * @param op - */ - public PipelineJoin(final PipelineJoin<E> op) { - super(op); - } - - /** - * Shallow copy vararg constructor. - * - * @param args - * @param annotations - */ - public PipelineJoin(final BOp[] args, NV... annotations) { + outputSolutions.add(t.outputSolutions.get()); - this(args, NV.asMap(annotations)); - - } + // if (t.fanIn > this.fanIn) { + // // maximum reported fanIn for this join dimension. + // this.fanIn = t.fanIn; + // } + // if (t.fanOut > this.fanOut) { + // // maximum reported fanOut for this join dimension. + // this.fanOut += t.fanOut; + // } - /** - * Shallow copy constructor. - * - * @param args - * @param annotations - */ - public PipelineJoin(final BOp[] args, final Map<String, Object> annotations) { + } - super(args, annotations); + } -// if (arity() != 1) -// throw new IllegalArgumentException(); + @Override + protected void toString(final StringBuilder sb) { + sb.append(",accessPathDups=" + accessPathDups.get()); + sb.append(",accessPathCount=" + accessPathCount.get()); + sb.append(",accessPathRangeCount=" + accessPathRangeCount.get()); + sb.append(",accessPathChunksIn=" + accessPathChunksIn.get()); + sb.append(",accessPathUnitsIn=" + accessPathUnitsIn.get()); + sb.append(",inputSolutions=" + inputSolutions.get()); + sb.append(",outputSolutions=" + outputSolutions.get()); + sb.append(",joinHitRatio=" + getJoinHitRatio()); + } -// if (left() == null) -// throw new IllegalArgumentException(); + } - } - -// /** -// * The sole operand, which is the previous join in the pipeline join path. -// */ -// public PipelineOp left() { -// -// return (PipelineOp) get(0); -// -// } + /** + * Deep copy constructor. + * + * @param op + */ + public PipelineJoin(final PipelineJoin<E> op) { + super(op); + } - /** - * {@inheritDoc} - * - * @see Annotations#PREDICATE - */ - @SuppressWarnings("unchecked") + /** + * Shallow copy vararg constructor. + * + * @param args + * @param annotations + */ + public PipelineJoin(final BOp[] args, NV... annotations) { + + this(args, NV.asMap(annotations)); + + } + + /** + * Shallow copy constructor. + * + * @param args + * @param annotations + */ + public PipelineJoin(final BOp[] args, final Map<String, Object> annotations) { + + super(args, annotations); + + // if (arity() != 1) + // throw new IllegalArgumentException(); + + // if (left() == null) + // throw new IllegalArgumentException(); + + } + + // /** + // * The sole operand, which is the previous join in the pipeline join path. + // */ + // public PipelineOp left() { + // + // return (PipelineOp) get(0); + // + // } + + /** + * {@inheritDoc} + * + * @see Annotations#PREDICATE + */ + @SuppressWarnings("unchecked") public IPredicate<E> getPredicate() { - + return (IPredicate<E>) getRequiredProperty(Annotations.PREDICATE); - - } - /** - * @see Annotations#CONSTRAINTS - */ - public IConstraint[] constraints() { + } - return getProperty(Annotations.CONSTRAINTS, null/* defaultValue */); + /** + * @see Annotations#CONSTRAINTS + */ + public IConstraint[] constraints() { - } + return getProperty(Annotations.CONSTRAINTS, null/* defaultValue */); - /** - * @see Annotations#OPTIONAL - */ - public boolean isOptional() { + } - return getProperty(Annotations.OPTIONAL, Annotations.DEFAULT_OPTIONAL); + /** + * @see Annotations#OPTIONAL + */ + public boolean isOptional() { - } + return getProperty(Annotations.OPTIONAL, Annotations.DEFAULT_OPTIONAL); - /** - * @see Annotations#MAX_PARALLEL - */ - public int getMaxParallel() { + } - return getProperty(Annotations.MAX_PARALLEL, Annotations.DEFAULT_MAX_PARALLEL); + /** + * @see Annotations#MAX_PARALLEL + */ + public int getMaxParallel() { - } + // return 5; + return getProperty(Annotations.MAX_PARALLEL, + Annotations.DEFAULT_MAX_PARALLEL); - /** - * @see Annotations#SELECT - */ - public IVariable<?>[] variablesToKeep() { + } - return getProperty(Annotations.SELECT, null/* defaultValue */); + /** + * @see Annotations#SELECT + */ + public IVariable<?>[] variablesToKeep() { - } + return getProperty(Annotations.SELECT, null/* defaultValue */); - @Override - public PipelineJoinStats newStats() { + } - return new PipelineJoinStats(); - - } - - public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { + @Override + public PipelineJoinStats newStats() { - return new FutureTask<Void>(new JoinTask<E>(this, context)); - - } + return new PipelineJoinStats(); - /** - * Pipeline join impl. - */ - private static class JoinTask<E> extends Haltable<Void> implements Callable<Void> { + } - /** - * The join that is being executed. - */ - final private PipelineJoin<?> joinOp; + public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { - /** - * The constraint (if any) specified for the join operator. - */ - final private IConstraint[] constraints; + return new FutureTask<Void>(new JoinTask<E>(this, context)); - /** - * The maximum parallelism with which the {@link JoinTask} will - * consume the source {@link IBindingSet}s. - * - * @see Annotations#MAX_PARALLEL - */ - final private int maxParallel; + } - /** - * The service used for executing subtasks (optional). - * - * @see #maxParallel - */ - final private Executor service; + /** + * Pipeline join impl. + */ + private static class JoinTask<E> extends Haltable<Void> implements + Callable<Void> { - /** - * True iff the {@link #predicate} operand is an optional pattern (aka if - * this is a SPARQL style left join). - */ - final private boolean optional; + /** + * The join that is being executed. + */ + final private PipelineJoin<?> joinOp; - /** - * The variables to be retained by the join operator. Variables not - * appearing in this list will be stripped before writing out the - * binding set onto the output sink(s). - */ - final private IVariable<?>[] variablesToKeep; + /** + * The constraint (if any) specified for the join operator. + */ + final private IConstraint[] constraints; - /** - * The source for the elements to be joined. - */ - final private IPredicate<E> predicate; + /** + * The maximum parallelism with which the {@link JoinTask} will consume + * the source {@link IBindingSet}s. + * + * @see Annotations#MAX_PARALLEL + */ + final private int maxParallel; - /** - * The relation associated with the {@link #predicate} operand. - */ - final private IRelation<E> relation; - - /** - * The partition identifier -or- <code>-1</code> if we are not reading - * on an index partition. - */ - final private int partitionId; - - /** - * The evaluation context. - */ - final private BOpContext<IBindingSet> context; + /** + * The service used for executing subtasks (optional). + * + * @see #maxParallel + */ + final private Executor service; - /** - * The statistics for this {@link JoinTask}. - */ - final private PipelineJoinStats stats; + /** + * True iff the {@link #predicate} operand is an optional pattern (aka + * if this is a SPARQL style left join). + */ + final private boolean optional; /** + * The variables to be retained by the join operator. Variables not + * appearing in this list will be stripped before writing out the + * binding set onto the output sink(s). + */ + final private IVariable<?>[] variablesToKeep; + + /** + * The source for the elements to be joined. + */ + final private IPredicate<E> predicate; + + /** + * The relation associated with the {@link #predicate} operand. + */ + final private IRelation<E> relation; + + /** + * The partition identifier -or- <code>-1</code> if we are not reading + * on an index partition. + */ + final private int partitionId; + + /** + * The evaluation context. + */ + final private BOpContext<IBindingSet> context; + +// /** +// * When <code>true</code>, the {@link #stats} will be tracked. This is +// * <code>false</code> unless logging is requested for {@link QueryLog} +// * or stats are explicitly request (e.g., to support cutoff joins). +// */ +// final private boolean trackStats; + + /** + * The statistics for this {@link JoinTask}. + */ + final private PipelineJoinStats stats; + + /** * An optional limit on the #of solutions to be produced. The limit is * ignored if it is {@link Long#MAX_VALUE}. */ - final private long limit; - - /** - * When <code>true</code> an attempt will be made to coalesce as-bound - * predicates which result in the same access path. - * - * @see Annotations#COALESCE_DUPLICATE_ACCESS_PATHS - */ - final boolean coalesceAccessPaths; - - /** - * Used to enforce the {@link Annotations#LIMIT} iff one is specified. - */ - final private AtomicLong exactOutputCount = new AtomicLong(); - - /** - * The source from which we read the binding set chunks. - * <p> - * Note: In keeping with the top-down evaluation of the operator tree - * the source should not be set until we begin to execute the - * {@link #left} operand and that should not happen until we are in - * {@link #call()} in order to ensure that the producer will be - * terminated if there is a problem setting up this join. Given that, it - * might need to be an atomic reference or volatile or the like. - */ - final private IAsynchronousIterator<IBindingSet[]> source; + final private long limit; - /** - * Where the join results are written. - * <p> - * Chunks of bindingSets are written pre-Thread unsynchronized buffers - * by {@link ChunkTask}. Those unsynchronized buffers overflow onto the - * per-JoinTask {@link #sink}, which is a thread-safe - * {@link IBlockingBuffer}. The downstream pipeline operator drains that - * {@link IBlockingBuffer} using its iterator(). When the {@link #sink} - * is closed and everything in it has been drained, then the downstream - * operator will conclude that no more bindingSets are available and it - * will terminate. - */ - final private IBlockingBuffer<IBindingSet[]> sink; + /** + * When <code>true</code> an attempt will be made to coalesce as-bound + * predicates which result in the same access path. + * + * @see Annotations#COALESCE_DUPLICATE_ACCESS_PATHS + */ + final boolean coalesceAccessPaths; - /** - * The alternative sink to use when the join is {@link #optional} AND - * {@link BOpContext#getSink2()} returns a distinct buffer for the - * alternative sink. The binding sets from the source are copied onto the - * alternative sink for an optional join if the join fails. Normally the - * {@link BOpContext#getSink()} can be used for both the joins which - * succeed and those which fail. The alternative sink is only necessary - * when the failed join needs to jump out of a join group rather than - * routing directly to the ancestor in the operator tree. - */ - final private IBlockingBuffer<IBindingSet[]> sink2; - - /** - * The thread-local buffer factory for the default sink. - */ - final private TLBFactory threadLocalBufferFactory; - - /** - * The thread-local buffer factory for the optional sink (iff the - * optional sink is defined). - */ - final private TLBFactory threadLocalBufferFactory2; + /** + * Used to enforce the {@link Annotations#LIMIT} iff one is specified. + */ + final private AtomicLong exactOutputCount = new AtomicLong(); - /** - * Instances of this class MUST be created in the appropriate execution - * context of the target {@link DataService} so that the federation and - * the joinNexus references are both correct and so that it has access - * to the local index object for the specified index partition. - * - * @param joinOp - * @param context - */ - public JoinTask(// - final PipelineJoin<E> joinOp,// - final BOpContext<IBindingSet> context - ) { + /** + * The source from which we read the binding set chunks. + * <p> + * Note: In keeping with the top-down evaluation of the operator tree + * the source should not be set until we begin to execute the + * {@link #left} operand and that should not happen until we are in + * {@link #call()} in order to ensure that the producer will be + * terminated if there is a problem setting up this join. Given that, it + * might need to be an atomic reference or volatile or the like. + */ + final private IAsynchronousIterator<IBindingSet[]> source; - if (joinOp == null) - throw new IllegalArgumentException(); - if (context == null) - throw new IllegalArgumentException(); + /** + * Where the join results are written. + * <p> + * Chunks of bindingSets are written pre-Thread unsynchronized buffers + * by {@link ChunkTask}. Those unsynchronized buffers overflow onto the + * per-JoinTask {@link #sink}, which is a thread-safe + * {@link IBlockingBuffer}. The downstream pipeline operator drains that + * {@link IBlockingBuffer} using its iterator(). When the {@link #sink} + * is closed and everything in it has been drained, then the downstream + * operator will conclude that no more bindingSets are available and it + * will terminate. + */ + final private IBlockingBuffer<IBindingSet[]> sink; - this.joinOp = joinOp; - this.predicate = joinOp.getPredicate(); - this.constraints = joinOp.constraints(); - this.maxParallel = joinOp.getMaxParallel(); - if (maxParallel < 0) - throw new IllegalArgumentException(Annotations.MAX_PARALLEL - + "=" + maxParallel); - if (maxParallel > 0) { - // shared service. - service = new LatchedExecutor(context.getIndexManager() - .getExecutorService(), maxParallel); - } else { - // run in the caller's thread. - service = null; - } - this.optional = joinOp.isOptional(); - this.variablesToKeep = joinOp.variablesToKeep(); - this.context = context; - this.relation = context.getRelation(predicate); - this.source = context.getSource(); - this.sink = context.getSink(); - this.sink2 = context.getSink2(); - this.partitionId = context.getPartitionId(); - this.stats = (PipelineJoinStats) context.getStats(); - this.limit = joinOp.getProperty(Annotations.LIMIT,Annotations.DEFAULT_LIMIT); + /** + * The alternative sink to use when the join is {@link #optional} AND + * {@link BOpContext#getSink2()} returns a distinct buffer for the + * alternative sink. The binding sets from the source are copied onto + * the alternative sink for an optional join if the join fails. Normally + * the {@link BOpContext#getSink()} can be used for both the joins which + * succeed and those which fail. The alternative sink is only necessary + * when the failed join needs to jump out of a join group rather than + * routing directly to the ancestor in the operator tree. + */ + final private IBlockingBuffer<IBindingSet[]> sink2; + + /** + * The thread-local buffer factory for the default sink. + */ + final private TLBFactory threadLocalBufferFactory; + + /** + * The thread-local buffer factory for the optional sink (iff the + * optional sink is defined). + */ + final private TLBFactory threadLocalBufferFactory2; + + /** + * Instances of this class MUST be created in the appropriate execution + * context of the target {@link DataService} so that the federation and + * the joinNexus references are both correct and so that it has access + * to the local index object for the specified index partition. + * + * @param joinOp + * @param context + */ + public JoinTask(// + final PipelineJoin<E> joinOp,// + final BOpContext<IBindingSet> context) { + + if (joinOp == null) + throw new IllegalArgumentException(); + if (context == null) + throw new IllegalArgumentException(); + + this.joinOp = joinOp; + this.predicate = joinOp.getPredicate(); + this.constraints = joinOp.constraints(); + this.maxParallel = joinOp.getMaxParallel(); + if (maxParallel < 0) + throw new IllegalArgumentException(Annotations.MAX_PARALLEL + + "=" + maxParallel); + if (maxParallel > 0) { + // shared service. + service = new LatchedExecutor(context.getIndexManager() + .getExecutorService(), maxParallel); + } else { + // run in the caller's thread. + service = null; + } + this.optional = joinOp.isOptional(); + this.variablesToKeep = joinOp.variablesToKeep(); + this.context = context; + this.relation = context.getRelation(predicate); + this.source = context.getSource(); + this.sink = context.getSink(); + this.sink2 = context.getSink2(); + this.partitionId = context.getPartitionId(); + this.stats = (PipelineJoinStats) context.getStats(); + this.limit = joinOp.getProperty(Annotations.LIMIT, + Annotations.DEFAULT_LIMIT); this.coalesceAccessPaths = joinOp.getProperty( Annotations.COALESCE_DUPLICATE_ACCESS_PATHS, Annotations.DEFAULT_COALESCE_DUPLICATE_ACCESS_PATHS); - - this.threadLocalBufferFactory = new TLBFactory(sink); - - this.threadLocalBufferFactory2 = sink2 == null ? null - : new TLBFactory(sink2); - if (log.isDebugEnabled()) - log.debug("joinOp=" + joinOp); + this.threadLocalBufferFactory = new TLBFactory(sink); - } + this.threadLocalBufferFactory2 = sink2 == null ? null + : new TLBFactory(sink2); - public String toString() { + if (log.isDebugEnabled()) + log.debug("joinOp=" + joinOp); - return getClass().getName() + "{ joinOp=" + joinOp + "}"; + } - } + public String toString() { - /** - * Runs the {@link JoinTask}. - * - * @return <code>null</code>. - */ - public Void call() throws Exception { + return getClass().getName() + "{ joinOp=" + joinOp + "}"; -// final long begin = System.currentTimeMillis(); - - if (log.isDebugEnabled()) - log.debug("joinOp=" + joinOp); + } - try { + /** + * Runs the {@link JoinTask}. + * + * @return <code>null</code>. + */ + public Void call() throws Exception { - /* - * Consume bindingSet chunks from the source JoinTask(s). - */ - consumeSource(); + // final long begin = System.currentTimeMillis(); - /* - * Flush and close the thread-local output buffers. - */ - threadLocalBufferFactory.flush(); - if (threadLocalBufferFactory2 != null) - threadLocalBufferFactory2.flush(); + if (log.isDebugEnabled()) + log.debug("joinOp=" + joinOp); - // flush the sync buffer - flushAndCloseBuffersAndAwaitSinks(); + try { - if (log.isDebugEnabled()) - log.debug("JoinTask done: joinOp=" + joinOp); + /* + * Consume bindingSet chunks from the source JoinTask(s). + */ + consumeSource(); - halted(); + /* + * Flush and close the thread-local output buffers. + */ + threadLocalBufferFactory.flush(); + if (threadLocalBufferFactory2 != null) + threadLocalBufferFactory2.flush(); - return null; + // flush the sync buffer + flushAndCloseBuffersAndAwaitSinks(); - } catch (Throwable t) { + if (log.isDebugEnabled()) + log.debug("JoinTask done: joinOp=" + joinOp); - /* - * This is used for processing errors and also if this task is - * interrupted (because the sink has been closed). - */ + halted(); - halt(t); + return null; - // reset the unsync buffers. - try { - // resetUnsyncBuffers(); - threadLocalBufferFactory.reset(); - if (threadLocalBufferFactory2 != null) - threadLocalBufferFactory2.reset(); - } catch (Throwable t2) { - log.error(t2.getLocalizedMessage(), t2); - } + } catch (Throwable t) { - // reset the sync buffer and cancel the sink JoinTasks. - try { - cancelSinks(); - } catch (Throwable t2) { - log.error(t2.getLocalizedMessage(), t2); - } + /* + * This is used for processing errors and also if this task is + * interrupted (because the sink has been closed). + */ - /* - * Close source iterators, which will cause any source JoinTasks - * that are still executing to throw a CancellationException - * when the Future associated with the source iterator is - * cancelled. - */ - try { - closeSources(); - } catch (Throwable t2) { - log.error(t2.getLocalizedMessage(), t2); - } + halt(t); - throw new RuntimeException(t); + // reset the unsync buffers. + try { + // resetUnsyncBuffers(); + threadLocalBufferFactory.reset(); + if (threadLocalBufferFactory2 != null) + threadLocalBufferFactory2.reset(); + } catch (Throwable t2) { + log.error(t2.getLocalizedMessage(), t2); + } -// } finally { -// -// stats.elapsed.add(System.currentTimeMillis() - begin); - - } + // reset the sync buffer and cancel the sink JoinTasks. + try { + cancelSinks(); + } catch (Throwable t2) { + log.error(t2.getLocalizedMessage(), t2); + } - } + /* + * Close source iterators, which will cause any source JoinTasks + * that are still executing to throw a CancellationException + * when the Future associated with the source iterator is + * cancelled. + */ + try { + closeSources(); + } catch (Throwable t2) { + log.error(t2.getLocalizedMessage(), t2); + } - /** - * Consume {@link IBindingSet} chunks from the {@link #source}. - * - * @throws Exception - */ - protected void consumeSource() throws Exception { + throw new RuntimeException(t); - IBindingSet[] chunk; + // } finally { + // + // stats.elapsed.add(System.currentTimeMillis() - begin); + } + + } + + /** + * Consume {@link IBindingSet} chunks from the {@link #source}. + * + * @throws Exception + */ + protected void consumeSource() throws Exception { + + IBindingSet[] chunk; + while (!isDone() && (chunk = nextChunk()) != null) { if (chunk.length == 0) { @@ -789,240 +803,240 @@ */ continue; } - - /* - * Consume the chunk until done using either the caller's thread - * or the executor service as appropriate to run subtasks. - */ - if (chunk.length <= 1) { - - /* - * Run on the caller's thread anyway since there is just one - * binding set to be consumed. - */ - - new BindingSetConsumerTask(null/* service */, chunk).call(); - - } else { - - /* - * Run subtasks on either the caller's thread or the shared - * executed service depending on the configured value of - * [maxParallel]. - */ - - new BindingSetConsumerTask(service, chunk).call(); - - } - - } - } + /* + * Consume the chunk until done using either the caller's thread + * or the executor service as appropriate to run subtasks. + */ + if (chunk.length <= 1) { - /** - * Closes the {@link #source} specified to the ctor. - */ - protected void closeSources() { + /* + * Run on the caller's thread anyway since there is just one + * binding set to be consumed. + */ - if (log.isInfoEnabled()) - log.info(toString()); + new BindingSetConsumerTask(null/* service */, chunk) + .call(); - source.close(); + } else { - } + /* + * Run subtasks on either the caller's thread or the shared + * executed service depending on the configured value of + * [maxParallel]. + */ - /** - * Flush and close all output buffers and await sink {@link JoinTask} - * (s). - * <p> - * Note: You MUST close the {@link BlockingBuffer} from which each sink - * reads <em>before</em> invoking this method in order for those sinks - * to terminate. Otherwise the source {@link IAsynchronousIterator}(s) - * on which the sink is reading will remain open and the sink will never - * decide that it has exhausted its source(s). - * - * @throws InterruptedException - * @throws ExecutionException - */ - protected void flushAndCloseBuffersAndAwaitSinks() - throws InterruptedException, ExecutionException { + new BindingSetConsumerTask(service, chunk).call(); - if (log.isDebugEnabled()) - log.debug("joinOp=" + joinOp); + } - /* - * Close the thread-safe output buffer. For any JOIN except the - * last, this buffer will be the source for one or more sink - * JoinTasks for the next join dimension. Once this buffer is - * closed, the asynchronous iterator draining the buffer will - * eventually report that there is nothing left for it to process. - * - * Note: This is a BlockingBuffer. BlockingBuffer#flush() is a NOP. - */ + } - sink.flush(); - sink.close(); - - if(sink2!=null) { - sink2.flush(); - sink2.close(); - } - - } + } - /** - * Cancel sink {@link JoinTask}(s). - */ - protected void cancelSinks() { + /** + * Closes the {@link #source} specified to the ctor. + */ + protected void closeSources() { - if (log.isDebugEnabled()) - log.debug("joinOp=" + joinOp); + if (log.isInfoEnabled()) + log.info(toString()); - sink.reset(); + source.close(); - if (sink.getFuture() != null) { + } - sink.getFuture().cancel(true/* mayInterruptIfRunning */); + /** + * Flush and close all output buffers and await sink {@link JoinTask} + * (s). + * <p> + * Note: You MUST close the {@link BlockingBuffer} from which each sink + * reads <em>before</em> invoking this method in order for those sinks + * to terminate. Otherwise the source {@link IAsynchronousIterator}(s) + * on which the sink is reading will remain open and the sink will never + * decide that it has exhausted its source(s). + * + * @throws InterruptedException + * @throws ExecutionException + */ + protected void flushAndCloseBuffersAndAwaitSinks() + throws InterruptedException, ExecutionException { - } + if (log.isDebugEnabled()) + log.debug("joinOp=" + joinOp); - if (sink2 != null) { - - sink2.reset(); + /* + * Close the thread-safe output buffer. For any JOIN except the + * last, this buffer will be the source for one or more sink + * JoinTasks for the next join dimension. Once this buffer is + * closed, the asynchronous iterator draining the buffer will + * eventually report that there is nothing left for it to process. + * + * Note: This is a BlockingBuffer. BlockingBuffer#flush() is a NOP. + */ - if (sink2.getFuture() != null) { + sink.flush(); + sink.close(); - sink2.getFuture().cancel(true/* mayInterruptIfRunning */); + if (sink2 != null) { + sink2.flush(); + sink2.close(); + } - } - - } + } - } + /** + * Cancel sink {@link JoinTask}(s). + */ + protected void cancelSinks() { - /** - * Return a chunk of {@link IBindingSet}s from source. - * - * @return The next chunk -or- <code>null</code> iff the source is - * exhausted. - */ - protected IBindingSet[] nextChunk() throws InterruptedException { + if (log.isDebugEnabled()) + log.debug("joinOp=" + joinOp); - if (log.isDebugEnabled()) - log.debug("joinOp=" + joinOp); + sink.reset(); - while (!source.isExhausted()) { + if (sink.getFuture() != null) { - halted(); + sink.getFuture().cancel(true/* mayInterruptIfRunning */); - // note: uses timeout to avoid blocking w/o testing [halt]. - if (source.hasNext(10, TimeUnit.MILLISECONDS)) { + } - // read the chunk. - final IBindingSet[] chunk = source.next(); + if (sink2 != null) { - stats.chunksIn.increment(); - stats.unitsIn.add(chunk.length); + sink2.reset(); - if (log.isDebugEnabled()) - log.debug("Read chunk from source: chunkSize=" - + chunk.length + ", joinOp=" + joinOp); + if (sink2.getFuture() != null) { - return chunk; + sink2.getFuture().cancel(true/* mayInterruptIfRunning */); - } + } - } + } - /* - * Termination condition: the source is exhausted. - */ + } - if (log.isDebugEnabled()) - log.debug("Source exhausted: joinOp=" + joinOp); + /** + * Return a chunk of {@link IBindingSet}s from source. + * + * @return The next chunk -or- <code>null</code> iff the source is + * exhausted. + */ + protected IBindingSet[] nextChunk() throws InterruptedException { - return null; + if (log.isDebugEnabled()) + log.debug("joinOp=" + joinOp); - } + while (!source.isExhausted()) { - /** - * Class consumes a chunk of binding set executing a nested indexed join - * until canceled, interrupted, or all the binding sets are exhausted. - * For each {@link IBindingSet} in the chunk, an {@link AccessPathTask} - * is created which will consume that {@link IBindingSet}. The - * {@link AccessPathTask}s are sorted based on their - * <code>fromKey</code> so as to order the execution of those tasks in a - * manner that will maximize the efficiency of index reads. The ordered - * {@link AccessPathTask}s are then submitted to the caller's - * {@link Executor} or run in the caller's thread if the executor is - * <code>null</code>. - * - * @author <a href="mailto:tho...@us...">Bryan - * Thompson</a> - * @version $Id$ - */ - protected class BindingSetConsumerTask implements Callable<Void> { + halted(); - private final Executor executor; - private final IBindingSet[] chunk; + // note: uses timeout to avoid blocking w/o testing [halt]. + if (source.hasNext(10, TimeUnit.MILLISECONDS)) { - /** - * - * @param executor - * The service that will execute the generated - * {@link AccessPathTask}s -or- <code>null</code> IFF you - * want the {@link AccessPathTask}s to be executed in the - * caller's thread. - * @param chunk - * A chunk of binding sets from the upstream producer. - */ - public BindingSetConsumerTask(final Executor executor, - final IBindingSet[] chunk) { + // read the chunk. + final IBindingSet[] chunk = source.next(); - if (chunk == null) - throw new IllegalArgumentException(); - - this.executor = executor; - - this.chunk = chunk; + stats.chunksIn.increment(); + stats.unitsIn.add(chunk.length); - } + if (log.isDebugEnabled()) + log.debug("Read chunk from source: chunkSize=" + + chunk.length + ", joinOp=" + joinOp); - /** - * Read chunks from one or more sources until canceled, interrupted, - * or all sources are exhausted and submits {@link AccessPathTask}s - * to the caller's {@link ExecutorService} -or- executes those tasks - * in the caller's thread if no {@link ExecutorService} was provided - * to the ctor. - * <p> - * Note: When running with an {@link ExecutorService}, the caller is - * responsible for waiting on that {@link ExecutorService} until the - * {@link AccessPathTask}s to complete and must verify all tasks - * completed successfully. - * - * @return <code>null</code> - * - * @throws BufferClosedException - * if there is an attempt to output a chunk of - * {@link IBindingSet}s or {@link ISolution}s and the - * output buffer is an {@link IBlockingBuffer} (true for - * all join dimensions exception the lastJoin and also - * true for query on the lastJoin) and that - * {@link IBlockingBuffer} has been closed. - */ + return chunk; + + } + + } + + /* + * Termination condition: the source is exhausted. + */ + + if (log.isDebugEnabled()) + log.debug("Source exhausted: joinOp=" + joinOp); + + return null; + + } + + /** + * Class consumes a chunk of binding set executing a nested indexed join + * until canceled, interrupted, or all the binding sets are exhausted. + * For each {@link IBindingSet} in the chunk, an {@link AccessPathTask} + * is created which will consume that {@link IBindingSet}. The + * {@link AccessPathTask}s are sorted based on their + * <code>fromKey</code> so as to order the execution of those tasks in a + * manner that will maximize the efficiency of index reads. The ordered + * {@link AccessPathTask}s are then submitted to the caller's + * {@link Executor} or run in the caller's thread if the executor is + * <code>null</code>. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + protected class BindingSetConsumerTask implements Callable<Void> { + + private final Executor executor; + private final IBindingSet[] chunk; + + /** + * + * @param executor + * The service that will execute the generated + * {@link AccessPathTask}s -or- <code>null</code> IFF you + * want the {@link AccessPathTask}s to be executed in the + * caller's thread. + * @param chunk + * A chunk of binding sets from the upstream producer. + */ + public BindingSetConsumerTask(final Executor executor, + final IBindingSet[] chunk) { + + if (chunk == null) + throw new IllegalArgumentException(); + + this.executor = executor; + + this.chunk = chunk; + + } + + /** + * Read chunks from one or more sources until canceled, interrupted, + * or all sources are exhausted and submits {@link AccessPathTask}s + * to the caller's {@link ExecutorService} -or- executes those tasks + * in the caller's thread if no {@link ExecutorService} was provided + * to the ctor. + * <p> + * Note: When running with an {@link ExecutorService}, the caller is + * responsible for waiting on that {@link ExecutorService} until the + * {@link AccessPathTask}s to complete and must verify all tasks + * completed successfully. + * + * @return <code>null</code> + * + * @throws BufferClosedException + * if there is an attempt to output a chunk of + * {@link IBindingSet}s or {@link ISolution}s and the + * output buffer is an {@link IBlockingBuffer} (true for + * all join dimensions exception the lastJoin and also + * true for query on the lastJoin) and that + * {@link IBlockingBuffer} has been closed. + */ public Void call() throws Exception { - try { + try { - if (chunk.length == 1) { - - // fast path if the chunk has a single binding set. - runOneTask(); - - return null; - - } + if (chunk.length == 1) { + // fast path if the chunk has a single binding set. + runOneTask(); + + return null; + + } + /* * Generate (and optionally coalesce) the access path tasks. */ @@ -1031,33 +1045,33 @@ /* * Reorder those tasks for better index read performance. */ - reorderTasks(tasks); + reorderTasks(tasks); - /* - * Execute the tasks (either in the caller's thread or on - * the supplied service). - */ - executeTasks(tasks); + /* + * Execute the tasks (either in the caller's thread or on + * the supplied service). + */ + executeTasks(tasks); - return null; + return null; - } catch (Throwable t) { + } catch (Throwable t) { - halt(t); + halt(t); - throw new RuntimeException(t); + throw new RuntimeException(t); - } + } - } + } - /** - * There is exactly one {@link IBindingSet} in the chunk, so run - * exactly one {@link AccessPathTask}. - * - * @throws Exception - */ - private void runOneTask() throws Exception { + /** + * There is exactly one {@link IBindingSet} in the chunk, so run + * exactly one {@link AccessPathTask}. + * + * @throws Exception + */ + private void runOneTask() throws Exception { if (chunk.length != 1) throw new AssertionError(); @@ -1094,10 +1108,11 @@ * * @param chunk * The chunk. - * + * * @return The tasks to process that chunk. */ - protected AccessPathTask[] generateAccessPaths(final IBindingSet[] chunk) { + protected AccessPathTask[] generateAccessPaths( + final IBindingSet[] chunk) { final AccessPathTask[] tasks; @@ -1122,7 +1137,7 @@ /* * Do not coalesce access paths. */ - + tasks = new JoinTask.AccessPathTask[chunk.length]; for (int i = 0; i < chunk.length; i++) { @@ -1157,272 +1172,272 @@ } return tasks; - - } - - /** - * Populates a map of asBound predicates paired to a set of - * bindingSets. - * <p> - * Note: The {@link AccessPathTask} will apply each bindingSet to - * each element visited by the {@link IAccessPath} obtained for the - * asBound {@link IPredicate}. This has the natural consequence of - * eliminating subqueries within the chunk. - * - * @param chunk - * A chunk of bindingSets from the source join dimension. - * - * @return A map which pairs the distinct asBound predicates to the - * bindingSets in the chunk from which the predicate was - * generated. - */ - protected Map<HashedPredicate<E>, Collection<IBindingSet>> combineBindingSets( - final IBindingSet[] chunk) { - if (log.isDebugEnabled()) - log.debug("chunkSize=" + chunk.length); + } - final Map<HashedPredicate<E>, Collection<IBindingSet>> map = new LinkedHashMap<HashedPredicate<E>, Collection<IBindingSet>>( - chunk.length); + /** + * Populates a map of asBound predicates paired to a set of + * bindingSets. + * <p> + * Note: The {@link AccessPathTask} will apply each bindingSet to + * each element visited by the {@link IAccessPath} obtained for the + * asBound {@link IPredicate}. This has the natural consequence of + * eliminating subqueries within the chunk. + * + * @param chunk + * A chunk of bindingSets from the source join dimension. + * + * @return A map which pairs the distinct asBound predicates to the + * bindingSets in the chunk from which the predicate was + * generated. + */ + protected Map<HashedPredicate<E>, Collection<IBindingSet>> combineBindingSets( + final IBindingSet[] chunk) { - for (IBindingSet bindingSet : chunk) { + if (log.isDebugEnabled()) + log.debug("chunkSize=" + chunk.length); - halted(); + final Map<HashedPredicate<E>, Collection<IBindingSet>> map = new LinkedHashMap<HashedPredicate<E>, Collection<IBindingSet>>( + chunk.length); - // constrain the predicate to the given bindings. - IPredicate<E> asBound = predicate.asBound(bindingSet); + for (IBindingSet bindingSet : chunk) { - if (partitionId != -1) { + halted(); - /* - * Constrain the predicate to the desired index - * partition. - * - * Note: we do this for scale-out joins since the access - * path will be evaluated by a JoinTask dedicated to - * this index partition, which is part of how we give - * the JoinTask to gain access to the local index object - * for an index partition. - */ + // constrain the predicate to the given bindings. + IPredicate<E> asBound = predicate.asBound(bindingSet); - asBound = asBound.setPartitionId(partitionId); + if (partitionId != -1) { - } + /* + * Constrain the predicate to the desired index + * partition. + * + * Note: we do this for scale-out joins since the access + * path will be evaluated by a JoinTask dedicated to + * this index partition, which is part of how we give + * the JoinTask to gain access to the local index object + * for an index partition. + */ - // lookup the asBound predicate in the map. - final HashedPredicate<E> hashedPred = new HashedPredicate<E>(asBound); - Collection<IBindingSet> values = map.get(hashedPred); + asBound = asBound.setPartitionId(partitionId); - if (values == null) { + } - /* - * This is the first bindingSet for this asBound - * predicate. We create a collection of bindingSets to - * be paired with that predicate and put the collection - * into the map using that predicate as the key. - */ + // lookup the asBound predicate in the map. + final HashedPredicate<E> hashedPred = new HashedPredicate<E>( + asBound); + Collection<IBindingSet> values = map.get(hashedPred); - values = new LinkedList<IBindingSet>(); + if (values == null) { - map.put(hashedPred, values); + /* + * This is the first bindingSet for this asBound + * predicate. We create a collection of bindingSets to + * be paired with that predicate and put the collection + * into the map using that predicate as the key. + */ - } else { + values = new LinkedList<IBindingSet>(); - // more than one bindingSet will use the same access - // path. - stats.accessPathDups.increment(); + map.put(hashedPred, values); - } + } else { - /* - * Add the bindingSet to the collection of bindingSets - * paired with the asBound predicate. - */ + // more than one bindingSet will use the same access + // path. + stats.accessPathDups.increment(); - values.add(bindingSet); + } - } + /* + * Add the bindingSet to the collection of bindingSets + * paired with the asBound predicate. + */ - if (log.isDebugEnabled()) - log.debug("chunkSize=" + chunk.length - + ", #distinct predicates=" + map.size()); + values.add(bindingSet); - return map; + } - } + if (log.isDebugEnabled()) + log.debug("chunkSize=" + chunk.length + + ", #distinct predicates=" + map.size()); - /** - * Creates an {@link AccessPathTask} for each {@link IBindingSet} in - * the given chunk. - * - * @param chunk - * A chunk of {@link IBindingSet}s from one or more - * source {@link JoinTask}s. - * - * @return A chunk of {@link AccessPathTask} in a desirable - * execution order. - * - * @throws Exception - */ - protected AccessPathTask[] getAccessPathTasks( - final Map<HashedPredicate<E>, Collection<IBindingSet>> map) { + return map; - final int n = map.size(); + } - if (log.isDebugEnabled()) - log.debug("#distinct predicates=" + n); + /** + * Creates an {@link AccessPathTask} for each {@link IBindingSet} in + * the given chunk. + * + * @param chunk + * A chunk of {@link IBindingSet}s from one or more + * source {@link JoinTask}s. + * + * @return A chunk of {@link AccessPathTask} in a desirable + * execution order. + * + * @throws Exception + */ + protected AccessPathTask[] getAccessPathTasks( + final Map<HashedPredicate<E>, Collection<IBindingSet>> map) { - final AccessPathTask[] tasks = new JoinTask.AccessPathTask[n]; + final int n = map.size(); - final Iterator<Map.Entry<HashedPredicate<E>, Collection<IBindingSet>>> itr ... [truncated message content] |
From: <tho...@us...> - 2011-01-05 13:45:29
|
Revision: 4052 http://bigdata.svn.sourceforge.net/bigdata/?rev=4052&view=rev Author: thompsonbry Date: 2011-01-05 13:45:21 +0000 (Wed, 05 Jan 2011) Log Message: ----------- Restored run against local U50 instance. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/bop/rdf/joinGraph/TestJoinGraphOnLubm.java Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/bop/rdf/joinGraph/TestJoinGraphOnLubm.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/bop/rdf/joinGraph/TestJoinGraphOnLubm.java 2011-01-05 13:44:51 UTC (rev 4051) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/bop/rdf/joinGraph/TestJoinGraphOnLubm.java 2011-01-05 13:45:21 UTC (rev 4052) @@ -230,7 +230,7 @@ final Properties properties = getProperties(); final File file; - if (true) { + if (false) { /* * Use a persistent file that is generated once and then reused by * each test run. @@ -244,7 +244,7 @@ /* * Use a specific file generated by some external process. */ - final int nuniv = 1000; + final int nuniv = 50; file = new File("/data/lubm/U" + nuniv + "/bigdata-lubm.WORM.jnl"); namespace = "LUBM_U" + nuniv; } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-05 13:44:58
|
Revision: 4051 http://bigdata.svn.sourceforge.net/bigdata/?rev=4051&view=rev Author: thompsonbry Date: 2011-01-05 13:44:51 +0000 (Wed, 05 Jan 2011) Log Message: ----------- javadoc edit Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2011-01-04 00:24:31 UTC (rev 4050) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2011-01-05 13:44:51 UTC (rev 4051) @@ -67,8 +67,8 @@ /** * Log rule execution statistics. * - * @param stats - * The rule execution statistics. + * @param q + * The running query. * * @todo need start and end time for the query. */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-01-04 00:24:38
|
Revision: 4050 http://bigdata.svn.sourceforge.net/bigdata/?rev=4050&view=rev Author: thompsonbry Date: 2011-01-04 00:24:31 +0000 (Tue, 04 Jan 2011) Log Message: ----------- Deleted two support classes associated with the push()/pop() ibinding set api. Removed Paths: ------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/SinkTransitionBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/SinkTransitionMetadata.java Deleted: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/SinkTransitionBuffer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/SinkTransitionBuffer.java 2011-01-04 00:23:40 UTC (rev 4049) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/SinkTransitionBuffer.java 2011-01-04 00:24:31 UTC (rev 4050) @@ -1,106 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Jan 1st, 2011 - */ -package com.bigdata.bop.engine; - -import java.util.concurrent.Future; - -import com.bigdata.bop.IBindingSet; -import com.bigdata.relation.accesspath.IAsynchronousIterator; -import com.bigdata.relation.accesspath.IBlockingBuffer; - -/** - * Delegation pattern handles the {@link SinkTransitionMetadata}. - * - * @deprecated along with {@link SinkTransitionMetadata} - */ -class SinkTransitionBuffer implements - IBlockingBuffer<IBindingSet[]> { - - private final IBlockingBuffer<IBindingSet[]> b; - - private final SinkTransitionMetadata stm; - - /** - * - */ - public SinkTransitionBuffer(final IBlockingBuffer<IBindingSet[]> b, - final SinkTransitionMetadata stm) { - - this.b = b; - - this.stm = stm; - - } - - public IAsynchronousIterator<IBindingSet[]> iterator() { - return b.iterator(); - } - - public void setFuture(final Future future) { - b.setFuture(future); - } - - public void abort(final Throwable cause) { - b.abort(cause); - } - - public void close() { - b.close(); - } - - public Future getFuture() { - return b.getFuture(); - } - - public boolean isOpen() { - return b.isOpen(); - } - - public long flush() { - return b.flush(); - } - - public void add(final IBindingSet[] e) { - for (IBindingSet bset : e) { - stm.handleBindingSet(bset); - } - b.add(e); - } - - public boolean isEmpty() { - return b.isEmpty(); - } - - public void reset() { - b.reset(); - } - - public int size() { - return b.size(); - } - -} \ No newline at end of file Deleted: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/SinkTransitionMetadata.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/SinkTransitionMetadata.java 2011-01-04 00:23:40 UTC (rev 4049) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/SinkTransitionMetadata.java 2011-01-04 00:24:31 UTC (rev 4050) @@ -1,115 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -/* - * Created on Dec 31, 2010 - */ -package com.bigdata.bop.engine; - -import com.bigdata.bop.IBindingSet; -import com.bigdata.bop.PipelineOp; - -/** - * In order to setup the push/pop of the sink and altSink we need to specify - * certain metadata about the source groupId, the target groupId, and whether - * the transition is via the sink or the altSink. The groupId for the source and - * target operators MAY be null, in which case the operator is understood to be - * outside of any conditional binding group. - * <p> - * The action to be taken when the binding set is written to the sink or the - * altSink is determined by a simple decision matrix. - * - * <pre> - * | toGroup - * fromGroup + null + newGroup + sameGroup - * null | NOP | Push | n/a - * group | Pop | Pop+Push | NOP - * </pre> - * - * The value of the [boolean:save] flag for pop is decided based on whether the - * transition is via the default sink (save:=true) or the altSink (save:=false). - * - * @see PipelineOp.Annotations#CONDITIONAL_GROUP - * - * @todo Unit tests of this class in isolation. - * - * @deprecated It appears that this design can not be made to satisfy SPARQL - * optional group semantics. Therefore, we may be able to drop this - * class, support for it in the {@link ChunkedRunningQuery} and - * support for the symbol table stack in {@link IBindingSet}. - */ -class SinkTransitionMetadata { - - private final Integer fromGroupId; - - private final Integer toGroupId; - - private final boolean isSink; - - public String toString() { - - return getClass().getSimpleName() + "{from=" + fromGroupId + ",to=" - + toGroupId + ",isSink=" + isSink + "}"; - - } - - public SinkTransitionMetadata(final Integer fromGroupId, - final Integer toGroupId, final boolean isSink) { - - this.fromGroupId = fromGroupId; - - this.toGroupId = toGroupId; - - this.isSink = isSink; - - } - - /** - * Apply the appropriate action(s) to the binding set. - * - * @param bset - * The binding set. - */ - public void handleBindingSet(final IBindingSet bset) { -// if (fromGroupId == null) { -// if (toGroupId == null) -// return; -// // Transition from no group to some group. -// bset.push(); -// return; -// } else { -// if (toGroupId == null) -// // Transition from a group to no group. -// bset.pop(isSink/* save */); -// else if (toGroupId.equals(fromGroupId)) { -// // NOP (transition to the same group) -// } else { -// // Transition to a different group. -// bset.pop(isSink/* save */); -// bset.push(); -// } -// } - throw new UnsupportedOperationException(); - } - -} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |