This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <mrp...@us...> - 2010-09-02 22:24:28
|
Revision: 3499 http://bigdata.svn.sourceforge.net/bigdata/?rev=3499&view=rev Author: mrpersonick Date: 2010-09-02 22:24:22 +0000 (Thu, 02 Sep 2010) Log Message: ----------- added jini jars to Sesame Server install Modified Paths: -------------- trunk/build.xml Modified: trunk/build.xml =================================================================== --- trunk/build.xml 2010-09-02 20:42:43 UTC (rev 3498) +++ trunk/build.xml 2010-09-02 22:24:22 UTC (rev 3499) @@ -1992,6 +1992,10 @@ <fileset dir="${bigdata.dir}/bigdata/lib"> <include name="**/*.jar" /> </fileset> + <fileset dir="${bigdata.dir}/bigdata-jini/lib/jini/lib"> + <include name="jini-core.jar" /> + <include name="jini-ext.jar" /> + </fileset> </copy> <!-- copy resources to Workbench webapp. --> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-02 20:42:50
|
Revision: 3498 http://bigdata.svn.sourceforge.net/bigdata/?rev=3498&view=rev Author: thompsonbry Date: 2010-09-02 20:42:43 +0000 (Thu, 02 Sep 2010) Log Message: ----------- Added IKeyOrder#getKey(IKeyBuilder,E element) to format a key from an element. Added support and unit tests for optional pipeline joins in which the alternative sink is specified. This is to support jumping out of an optional join group. (Note that the star join does not currently support this feature and does not have a unit test for this feature). Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/AbstractRelation.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/IRelation.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/RelationFusedView.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/AbstractKeyOrder.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/IKeyOrder.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/ap/R.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconKeyOrder.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicKeyOrder.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOKeyOrder.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java 2010-09-02 20:20:34 UTC (rev 3497) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java 2010-09-02 20:42:43 UTC (rev 3498) @@ -65,8 +65,11 @@ /** * Default for {@link #CHUNK_OF_CHUNKS_CAPACITY} + * + * @todo was 100. dialed down to reduce heap consumption for arrays. + * test performance @ 100 and 1000. */ - int DEFAULT_CHUNK_OF_CHUNKS_CAPACITY = 1000; + int DEFAULT_CHUNK_OF_CHUNKS_CAPACITY = 100; /** * Sets the capacity of the {@link IBuffer}s used to accumulate a chunk Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2010-09-02 20:20:34 UTC (rev 3497) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2010-09-02 20:42:43 UTC (rev 3498) @@ -87,7 +87,8 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ * - * @todo Break the star join logic out into its own join operator. + * @todo Break the star join logic out into its own join operator and test + * suite. */ public class PipelineJoin extends BindingSetPipelineOp { @@ -324,12 +325,12 @@ /** * The join that is being executed. */ - final protected PipelineJoin joinOp; + final private PipelineJoin joinOp; /** * The constraint (if any) specified for the join operator. */ - final IConstraint[] constraints; + final private IConstraint[] constraints; /** * The maximum parallelism with which the {@link JoinTask} will @@ -337,101 +338,59 @@ * * @see Annotations#MAX_PARALLEL */ - final int maxParallel; + final private int maxParallel; /** * The service used for executing subtasks (optional). * * @see #maxParallel */ - final Executor service; + final private Executor service; /** * True iff the {@link #right} operand is an optional pattern (aka if * this is a SPARQL style left join). */ - final boolean optional; + final private boolean optional; /** - * The alternative sink to use when the join is {@link #optional} but - * the failed joined needs to jump out of a join group rather than - * routing directly to the ancestor in the operator tree. - * - * FIXME Support for the {@link #optionalSink} is not finished. When the - * optional target is not simply the direct ancestor in the operator - * tree then we need to have a separate thread local buffer in front of - * the optional sink for the join task. This means that we need to use - * two {@link #threadLocalBufferFactory}s, one for the default sink and - * one for the alternative sink. All of this only matters when the - * binding sets are being routed out of an optional join group. When the - * tails are independent optionals then the target is the same as the - * target for binding sets which do join. - */ - final IBlockingBuffer<IBindingSet[]> optionalSink; - - /** * The variables to be retained by the join operator. Variables not * appearing in this list will be stripped before writing out the - * binding set onto the {@link #sink}. + * binding set onto the output sink(s). */ - final IVariable<?>[] variablesToKeep; + final private IVariable<?>[] variablesToKeep; - /** - * The source for the binding sets. - */ - final BindingSetPipelineOp left; +// /** +// * The source for the binding sets. +// */ +// final BindingSetPipelineOp left; /** * The source for the elements to be joined. */ - final IPredicate<?> right; + final private IPredicate<?> right; /** * The relation associated with the {@link #right} operand. */ - final IRelation<?> relation; + final private IRelation<?> relation; /** * The partition identifier -or- <code>-1</code> if we are not reading * on an index partition. */ - final int partitionId; + final private int partitionId; /** * The evaluation context. */ - final protected BOpContext<IBindingSet> context; + final private BOpContext<IBindingSet> context; /** * The statistics for this {@link JoinTask}. */ - final PipelineJoinStats stats; + final private PipelineJoinStats stats; - final private ThreadLocalBufferFactory<AbstractUnsynchronizedArrayBuffer<IBindingSet>, IBindingSet> threadLocalBufferFactory = new ThreadLocalBufferFactory<AbstractUnsynchronizedArrayBuffer<IBindingSet>, IBindingSet>() { - - @Override - protected AbstractUnsynchronizedArrayBuffer<IBindingSet> initialValue() { - - // new buffer created by the concrete JoinClass impl. - return newUnsyncOutputBuffer(); - - } - - @Override - protected void halted() { - - JoinTask.this.halted(); - - } - - }; - - public String toString() { - - return getClass().getName() + "{ joinOp=" + joinOp + "}"; - - } - /** * The source from which we read the binding set chunks. * <p> @@ -459,17 +418,36 @@ final private IBlockingBuffer<IBindingSet[]> sink; /** + * The alternative sink to use when the join is {@link #optional} AND + * {@link BOpContext#getSink2()} returns a distinct buffer for the + * alternative sink. The binding sets from the source are copied onto the + * alternative sink for an optional join if the join fails. Normally the + * {@link BOpContext#getSink()} can be used for both the joins which + * succeed and those which fail. The alternative sink is only necessary + * when the failed join needs to jump out of a join group rather than + * routing directly to the ancestor in the operator tree. + */ + final private IBlockingBuffer<IBindingSet[]> sink2; + + /** + * The thread-local buffer factory for the default sink. + */ + final private TLBFactory threadLocalBufferFactory; + + /** + * The thread-local buffer factory for the optional sink (iff the + * optional sink is defined). + */ + final private TLBFactory threadLocalBufferFactory2; + + /** * Instances of this class MUST be created in the appropriate execution * context of the target {@link DataService} so that the federation and * the joinNexus references are both correct and so that it has access * to the local index object for the specified index partition. * * @param joinOp - * @param joinNexus - * @param sink - * The sink on which the {@link IBindingSet} chunks are - * written. - * @param requiredVars + * @param context */ public JoinTask(// final PipelineJoin joinOp,// @@ -483,7 +461,7 @@ // this.fed = context.getFederation(); this.joinOp = joinOp; - this.left = joinOp.left(); +// this.left = joinOp.left(); this.right = joinOp.right(); this.constraints = joinOp.constraints(); this.maxParallel = joinOp.getMaxParallel(); @@ -506,15 +484,26 @@ this.relation = context.getReadRelation(right); this.source = context.getSource(); this.sink = context.getSink(); - this.optionalSink = context.getSink2(); + this.sink2 = context.getSink2(); this.partitionId = context.getPartitionId(); this.stats = (PipelineJoinStats) context.getStats(); + this.threadLocalBufferFactory = new TLBFactory(sink); + + this.threadLocalBufferFactory2 = sink2 == null ? null + : new TLBFactory(sink2); + if (log.isDebugEnabled()) log.debug("joinOp=" + joinOp); } + public String toString() { + + return getClass().getName() + "{ joinOp=" + joinOp + "}"; + + } + /** * Runs the {@link JoinTask}. * @@ -536,6 +525,8 @@ * Flush and close the thread-local output buffers. */ threadLocalBufferFactory.flush(); + if (threadLocalBufferFactory2 != null) + threadLocalBufferFactory2.flush(); // flush the sync buffer flushAndCloseBuffersAndAwaitSinks(); @@ -560,6 +551,8 @@ try { // resetUnsyncBuffers(); threadLocalBufferFactory.reset(); + if (threadLocalBufferFactory2 != null) + threadLocalBufferFactory2.reset(); } catch (Throwable t2) { log.error(t2.getLocalizedMessage(), t2); } @@ -642,27 +635,6 @@ } /** - * A method used by the {@link #threadLocalBufferFactory} to create new - * output buffer as required. The output buffer will be used to - * aggregate {@link IBindingSet}s generated by this {@link JoinTask}. - */ - final protected AbstractUnsynchronizedArrayBuffer<IBindingSet> newUnsyncOutputBuffer() { - - /* - * The index is not key-range partitioned. This means that there is - * ONE (1) JoinTask per predicate in the rule. The bindingSets are - * aggregated into chunks by this buffer. On overflow, the buffer - * writes onto a BlockingBuffer. The sink JoinTask reads from that - * BlockingBuffer's iterator. - */ - - // flushes to the syncBuffer. - return new UnsyncLocalOutputBuffer<IBindingSet>(stats, joinOp - .getChunkCapacity(), sink); - - } - - /** * Flush and close all output buffers and await sink {@link JoinTask} * (s). * <p> @@ -694,6 +666,11 @@ sink.flush(); sink.close(); + if(sink2!=null) { + sink2.flush(); + sink2.close(); + } + } /** @@ -709,7 +686,19 @@ if (sink.getFuture() != null) { sink.getFuture().cancel(true/* mayInterruptIfRunning */); + + } + + if (sink2 != null) { + sink2.reset(); + + if (sink2.getFuture() != null) { + + sink2.getFuture().cancel(true/* mayInterruptIfRunning */); + + } + } } @@ -1300,6 +1289,10 @@ final AbstractUnsynchronizedArrayBuffer<IBindingSet> unsyncBuffer = threadLocalBufferFactory .get(); + // Thread-local buffer iff optional sink is in use. + final AbstractUnsynchronizedArrayBuffer<IBindingSet> unsyncBuffer2 = threadLocalBufferFactory2 == null ? null + : threadLocalBufferFactory2.get(); + while (itr.hasNext()) { final Object[] chunk = itr.nextChunk(); @@ -1329,7 +1322,13 @@ for (IBindingSet bs : this.bindingSets) { - unsyncBuffer.add(bs); + if (unsyncBuffer2 == null) { + // use the default sink. + unsyncBuffer.add(bs); + } else { + // use the alternative sink. + unsyncBuffer2.add(bs); + } } @@ -1358,6 +1357,10 @@ final IStarJoin starJoin = (IStarJoin) accessPath .getPredicate(); + /* + * FIXME The star join does not handle the alternative sink yet. + * See the ChunkTask for the normal join. + */ final AbstractUnsynchronizedArrayBuffer<IBindingSet> unsyncBuffer = threadLocalBufferFactory .get(); @@ -1727,6 +1730,54 @@ }// class ChunkTask + /** + * Concrete implementation with hooks to halt a join. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ + private class TLBFactory + extends + ThreadLocalBufferFactory<AbstractUnsynchronizedArrayBuffer<IBindingSet>, IBindingSet> { + + final private IBlockingBuffer<IBindingSet[]> sink; + + /** + * + * @param sink + * The thread-safe buffer onto which the thread-local + * buffer overflow. + */ + public TLBFactory(final IBlockingBuffer<IBindingSet[]> sink) { + + if (sink == null) + throw new IllegalArgumentException(); + + this.sink = sink; + + } + + @Override + protected AbstractUnsynchronizedArrayBuffer<IBindingSet> initialValue() { + + /* + * Wrap the buffer provider to the constructor with a thread + * local buffer. + */ + + return new UnsyncLocalOutputBuffer<IBindingSet>(stats, joinOp + .getChunkCapacity(), sink); + + } + + @Override + protected void halted() { + + JoinTask.this.halted(); + + } + + } // class TLBFactory + }// class JoinTask } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/AbstractRelation.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/AbstractRelation.java 2010-09-02 20:20:34 UTC (rev 3497) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/AbstractRelation.java 2010-09-02 20:42:43 UTC (rev 3498) @@ -209,41 +209,13 @@ } /** - * This handles a request for an access path that is restricted to a - * specific index partition. + * {@inheritDoc} * <p> - * Note: This path is used with the scale-out JOIN strategy, which - * distributes join tasks onto each index partition from which it needs to - * read. Those tasks constrain the predicate to only read from the index - * partition which is being serviced by that join task. - * <p> * Note: Since the relation may materialize the index views for its various * access paths, and since we are restricted to a single index partition and * (presumably) an index manager that only sees the index partitions local * to a specific data service, we create an access path view for an index * partition without forcing the relation to be materialized. - * <p> - * Note: Expanders ARE NOT applied in this code path. Expanders require a - * total view of the relation, which is not available during scale-out - * pipeline joins. - * - * @param indexManager - * This MUST be the data service local index manager so that the - * returned access path will read against the local shard. - * @param predicate - * The predicate. {@link IPredicate#getPartitionId()} MUST return - * a valid index partition identifier. - * - * @throws IllegalArgumentException - * if either argument is <code>null</code>. - * @throws IllegalArgumentException - * unless the {@link IIndexManager} is a <em>local</em> index - * manager providing direct access to the specified shard. - * @throws IllegalArgumentException - * unless the predicate identifies a specific shard using - * {@link IPredicate#getPartitionId()}. - * - * @todo Raise this method into the {@link IRelation} interface. */ public IAccessPath<E> getAccessPathForIndexPartition( final IIndexManager indexManager, // Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/IRelation.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/IRelation.java 2010-09-02 20:20:34 UTC (rev 3497) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/IRelation.java 2010-09-02 20:42:43 UTC (rev 3498) @@ -175,13 +175,35 @@ */ IAccessPath<E> getAccessPath(IPredicate<E> predicate); - /* - * @todo raise this method into this interface. it is currently implemented - * by AbstractRelation and overridden by SPORelation to handle the different - * index families for triples versus quads. + /** + * This handles a request for an access path that is restricted to a + * specific index partition. This access path is used with the scale-out + * JOIN strategy, which distributes join tasks onto each index partition + * from which it needs to read. Those tasks constrain the predicate to only + * read from the index partition which is being serviced by that join task. + * <p> + * Note: Expanders ARE NOT applied in this code path. Expanders require a + * total view of the relation, which is not available during scale-out + * pipeline joins. + * + * @param indexManager + * This MUST be the data service local index manager so that the + * returned access path will read against the local shard. + * @param predicate + * The predicate. {@link IPredicate#getPartitionId()} MUST return + * a valid index partition identifier. + * + * @throws IllegalArgumentException + * if either argument is <code>null</code>. + * @throws IllegalArgumentException + * unless the {@link IIndexManager} is a <em>local</em> index + * manager providing direct access to the specified shard. + * @throws IllegalArgumentException + * unless the predicate identifies a specific shard using + * {@link IPredicate#getPartitionId()}. */ -// IAccessPath<E> getAccessPathForIndexPartition(IIndexManager indexManager, IPredicate<E> predicate); - + IAccessPath<E> getAccessPathForIndexPartition(IIndexManager indexManager, + IPredicate<E> predicate); /** * The fully qualified name of the index. * Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/RelationFusedView.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/RelationFusedView.java 2010-09-02 20:20:34 UTC (rev 3497) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/RelationFusedView.java 2010-09-02 20:42:43 UTC (rev 3498) @@ -208,4 +208,9 @@ throw new UnsupportedOperationException(); } + public IAccessPath<E> getAccessPathForIndexPartition( + IIndexManager indexManager, IPredicate<E> predicate) { + throw new UnsupportedOperationException(); + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/AbstractKeyOrder.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/AbstractKeyOrder.java 2010-09-02 20:20:34 UTC (rev 3497) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/AbstractKeyOrder.java 2010-09-02 20:42:43 UTC (rev 3498) @@ -27,6 +27,7 @@ package com.bigdata.striterator; +import com.bigdata.bop.IElement; import com.bigdata.bop.IPredicate; import com.bigdata.bop.IVariableOrConstant; import com.bigdata.btree.keys.IKeyBuilder; @@ -42,13 +43,36 @@ abstract public class AbstractKeyOrder<E> implements IKeyOrder<E> { /** - * This implementation should work fine unless you need to override the - * manner in which a bound value in the {@link IPredicate} is converted into - * a key. - * <p> * {@inheritDoc} + * + * @todo While you can override + * {@link #appendKeyComponent(IKeyBuilder, int, Object)} to use a + * different encoding, this does not really let you handle something + * which does not implement {@link IElement} without overriding + * {@link #getKey(IKeyBuilder, Object)} as well. */ - public byte[] getFromKey(final IKeyBuilder keyBuilder, + public byte[] getKey(final IKeyBuilder keyBuilder, final E element) { + + keyBuilder.reset(); + + final int keyArity = getKeyArity(); // use the key's "arity". + + for (int i = 0; i < keyArity; i++) { + + /* + * Note: If you need to override the default IKeyBuilder behavior do + * it in the invoked method. + */ + appendKeyComponent(keyBuilder, i, ((IElement) element) + .get(getKeyOrder(i))); + + } + + return keyBuilder.getKey(); + + } + + final public byte[] getFromKey(final IKeyBuilder keyBuilder, final IPredicate<E> predicate) { keyBuilder.reset(); @@ -67,7 +91,7 @@ /* * Note: If you need to override the default IKeyBuilder behavior do - * it here. + * it in the invoked method. */ appendKeyComponent(keyBuilder, i, term.get()); @@ -79,6 +103,15 @@ } + final public byte[] getToKey(final IKeyBuilder keyBuilder, + final IPredicate<E> predicate) { + + final byte[] from = getFromKey(keyBuilder, predicate); + + return from == null ? null : SuccessorUtil.successor(from); + + } + /** * Encodes an value into the key. This implementation uses the default * behavior of {@link IKeyBuilder}. If you need to specialize how a value @@ -90,14 +123,5 @@ keyBuilder.append(keyComponent); } - - public byte[] getToKey(final IKeyBuilder keyBuilder, - final IPredicate<E> predicate) { - final byte[] from = getFromKey(keyBuilder, predicate); - - return from == null ? null : SuccessorUtil.successor(from); - - } - } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/IKeyOrder.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/IKeyOrder.java 2010-09-02 20:20:34 UTC (rev 3497) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/IKeyOrder.java 2010-09-02 20:42:43 UTC (rev 3498) @@ -73,16 +73,44 @@ /* * New methods. */ - + /** + * Return the key for an element of the relation. + * + * @param keyBuilder + * The object which will be used to construct the key. + * @param element + * An element for the associated relation. + * + * @return The key for the index associated with this {@link IKeyOrder}. + */ + byte[] getKey(IKeyBuilder keyBuilder, E element); + + /** * Return the inclusive lower bound which would be used for a query against * this {@link IKeyOrder} for the given {@link IPredicate}. + * + * @param keyBuilder + * The object which will be used to construct the key. + * @param predicate + * A predicate describing bound and unbound fields for the key. + * + * @return The key corresponding to the inclusive lower bound for a query + * against that {@link IPredicate}. */ byte[] getFromKey(IKeyBuilder keyBuilder, IPredicate<E> predicate); /** * Return the exclusive upper bound which would be used for a query against * this {@link IKeyOrder} for the given {@link IPredicate}. + * + * @param keyBuilder + * The object which will be used to construct the key. + * @param predicate + * A predicate describing bound and unbound fields for the key. + * + * @return The key corresponding to the exclusive upper bound for a query + * against that {@link IPredicate}. */ byte[] getToKey(IKeyBuilder keyBuilder, IPredicate<E> predicate); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/ap/R.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/ap/R.java 2010-09-02 20:20:34 UTC (rev 3497) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/ap/R.java 2010-09-02 20:42:43 UTC (rev 3498) @@ -181,9 +181,7 @@ final E e = itr.next(); - // @todo this is not declarative! - final byte[] key = keyBuilder.reset().append(e.name) - .append(e.value).getKey(); + final byte[] key = primaryKeyOrder.getKey(keyBuilder, e); if (!ndx.contains(key)) { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/RunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/RunningQuery.java 2010-09-02 20:20:34 UTC (rev 3497) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/RunningQuery.java 2010-09-02 20:42:43 UTC (rev 3498) @@ -58,6 +58,7 @@ import com.bigdata.bop.NoSuchBOpException; import com.bigdata.bop.aggregation.Union; import com.bigdata.bop.ap.Predicate; +import com.bigdata.bop.ap.R; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; import com.bigdata.relation.accesspath.IElementFilter; @@ -189,9 +190,6 @@ * <p> * This is guarded by the {@link #runningStateLock}. * - * FIXME Declarative generation of a key for an index from an element (see - * {@link R}). - * * FIXME Unit tests for non-distinct {@link IElementFilter}s on an * {@link IPredicate}, unit tests for distinct element filter on an * {@link IPredicate} which is capable of distributed operations, handling Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-02 20:20:34 UTC (rev 3497) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-02 20:42:43 UTC (rev 3498) @@ -590,6 +590,10 @@ for (int i = 0; i < e.length; i++) { if (log.isInfoEnabled()) log.info(n + " : " + e[i]); + if (n >= expected.length) { + fail("Willing to deliver too many solutions: n=" + n + + " : " + e[i]); + } if (!expected[n].equals(e[i])) { fail("n=" + n + ", expected=" + expected[n] + ", actual=" + e[i]); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java 2010-09-02 20:20:34 UTC (rev 3497) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java 2010-09-02 20:42:43 UTC (rev 3498) @@ -74,9 +74,6 @@ * source binding sets or in the access path. Joins are pretty quick so it * is really difficult to test this outside of a stress test. BSBM is a * good way to validate this (run the qualification trials). - * - * @todo Write unit tests for star-joins (in their own test suite and ideally - * factor them out from the standard {@link PipelineJoin} operator). */ public class TestPipelineJoin extends TestCase2 { @@ -483,8 +480,6 @@ * * @throws ExecutionException * @throws InterruptedException - * - * @todo test w/ and w/o the alternative sink. */ public void test_optionalJoin() throws InterruptedException, ExecutionException { @@ -593,5 +588,128 @@ ft.get(); // verify nothing thrown. } + + /** + * Unit test for an optional {@link PipelineJoin} when the + * {@link BOpContext#getSink2() alternative sink} is specified. + * + * @throws InterruptedException + * @throws ExecutionException + */ + public void test_optionalJoin_withAltSink() throws InterruptedException, + ExecutionException { + final Var<?> x = Var.var("x"); + + final int startId = 1; + final int joinId = 2; + final int predId = 3; + + final PipelineJoin query = new PipelineJoin( + // left + new PipelineStartOp(new BOp[] {}, NV.asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, startId),// + })), + // right + new Predicate<E>(new IVariableOrConstant[] { + new Constant<String>("Mary"), x }, NV + .asMap(new NV[] {// + new NV(Predicate.Annotations.RELATION_NAME, + new String[] { namespace }),// + new NV(Predicate.Annotations.PARTITION_ID, + Integer.valueOf(-1)),// + new NV(Predicate.Annotations.OPTIONAL, + Boolean.FALSE),// + new NV(Predicate.Annotations.CONSTRAINT, null),// + new NV(Predicate.Annotations.EXPANDER, null),// + new NV(Predicate.Annotations.BOP_ID, predId),// + })), + // join annotations + NV + .asMap(new NV[] { // + new NV(BOpBase.Annotations.BOP_ID, + joinId), + new NV(PipelineJoin.Annotations.OPTIONAL, + Boolean.TRUE),// +// + })// + ); + + /* + * Setup the source with two initial binding sets. One has nothing bound + * and will join with (Mary,x:=John) and (Mary,x:=Paul). The other has + * x:=Luke which does not join. However, this is an optional join so + * x:=Luke should output anyway. + */ + final IAsynchronousIterator<IBindingSet[]> source; + { + final IBindingSet bset1 = new HashBindingSet(); + final IBindingSet bset2 = new HashBindingSet(); + { + + bset2.set(x, new Constant<String>("Luke")); + + } + source = new ThickAsynchronousIterator<IBindingSet[]>( + new IBindingSet[][] { new IBindingSet[] { bset1, bset2 } }); + } + + // the expected solutions for the default sink. + final IBindingSet[] expected = new IBindingSet[] {// + new ArrayBindingSet(// + new IVariable[] { x },// + new IConstant[] { new Constant<String>("John") }// + ),// + new ArrayBindingSet(// + new IVariable[] { x },// + new IConstant[] { new Constant<String>("Paul") }// + ),// + }; + + // the expected solutions for the alternative sink. + final IBindingSet[] expected2 = new IBindingSet[] {// + new ArrayBindingSet(// + new IVariable[] { x },// + new IConstant[] { new Constant<String>("Luke") }// + ),// + }; + + final IBlockingBuffer<IBindingSet[]> sink = query.newBuffer(); + + final IBlockingBuffer<IBindingSet[]> sink2 = query.newBuffer(); + + final PipelineJoinStats stats = query.newStats(); + + final BOpContext<IBindingSet> context = new BOpContext<IBindingSet>( + null/* fed */, jnl/* indexManager */, + ITx.READ_COMMITTED/* readTimestamp */, + ITx.UNISOLATED/* writeTimestamp */, -1/* partitionId */, stats, + source, sink, sink2); + + // get task. + final FutureTask<Void> ft = query.eval(context); + + // execute task. + jnl.getExecutorService().execute(ft); + + TestQueryEngine.assertSolutions(expected, sink.iterator()); + TestQueryEngine.assertSolutions(expected2, sink2.iterator()); + + // join task + assertEquals(1L, stats.chunksIn.get()); + assertEquals(2L, stats.unitsIn.get()); + assertEquals(3L, stats.unitsOut.get()); + assertEquals(2L, stats.chunksOut.get()); + // access path + assertEquals(0L, stats.accessPathDups.get()); + assertEquals(2L, stats.accessPathCount.get()); + assertEquals(1L, stats.chunkCount.get()); + assertEquals(2L, stats.elementCount.get()); + + assertTrue(ft.isDone()); + assertFalse(ft.isCancelled()); + ft.get(); // verify nothing thrown. + + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconKeyOrder.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconKeyOrder.java 2010-09-02 20:20:34 UTC (rev 3497) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconKeyOrder.java 2010-09-02 20:42:43 UTC (rev 3498) @@ -19,7 +19,7 @@ * @todo define a BigdataValuePredicate that interoperates with this class to * support joins against the lexicon. */ -public class LexiconKeyOrder extends AbstractKeyOrder<BigdataValue> implements IKeyOrder<BigdataValue> { +public class LexiconKeyOrder extends AbstractKeyOrder<BigdataValue> { /* * Note: these constants make it possible to use switch(index()) Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicKeyOrder.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicKeyOrder.java 2010-09-02 20:20:34 UTC (rev 3497) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/magic/MagicKeyOrder.java 2010-09-02 20:42:43 UTC (rev 3498) @@ -7,7 +7,7 @@ import com.bigdata.striterator.AbstractKeyOrder; import com.bigdata.striterator.IKeyOrder; -public class MagicKeyOrder extends AbstractKeyOrder<IMagicTuple> implements IKeyOrder<IMagicTuple>, Serializable { +public class MagicKeyOrder extends AbstractKeyOrder<IMagicTuple> implements Serializable { /** * Generated serialization version. Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOKeyOrder.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOKeyOrder.java 2010-09-02 20:20:34 UTC (rev 3497) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOKeyOrder.java 2010-09-02 20:42:43 UTC (rev 3498) @@ -58,7 +58,7 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ -public class SPOKeyOrder extends AbstractKeyOrder<ISPO> implements IKeyOrder<ISPO>, Serializable { +public class SPOKeyOrder extends AbstractKeyOrder<ISPO> implements Serializable { /** * This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-02 20:20:40
|
Revision: 3497 http://bigdata.svn.sourceforge.net/bigdata/?rev=3497&view=rev Author: blevine218 Date: 2010-09-02 20:20:34 +0000 (Thu, 02 Sep 2010) Log Message: ----------- Removed duplicate dependency on org.apache.river:tools. This was causing the latest m2eclipse plugin in Eclipse (Helios) to error out Modified Paths: -------------- branches/maven_scaleout/pom.xml Modified: branches/maven_scaleout/pom.xml =================================================================== --- branches/maven_scaleout/pom.xml 2010-09-02 19:46:45 UTC (rev 3496) +++ branches/maven_scaleout/pom.xml 2010-09-02 20:20:34 UTC (rev 3497) @@ -284,11 +284,6 @@ </dependency> <dependency> <groupId>org.apache.river</groupId> - <artifactId>tools</artifactId> - <version>2.1</version> - </dependency> - <dependency> - <groupId>org.apache.river</groupId> <artifactId>jsk-lib</artifactId> <version>2.1</version> </dependency> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <fko...@us...> - 2010-09-02 19:46:51
|
Revision: 3496 http://bigdata.svn.sourceforge.net/bigdata/?rev=3496&view=rev Author: fkoliver Date: 2010-09-02 19:46:45 +0000 (Thu, 02 Sep 2010) Log Message: ----------- Fix bad cut and paste in previous commit. Modified Paths: -------------- branches/maven_scaleout/src/main/deploy/legacy/scripts/bigdataenv Modified: branches/maven_scaleout/src/main/deploy/legacy/scripts/bigdataenv =================================================================== --- branches/maven_scaleout/src/main/deploy/legacy/scripts/bigdataenv 2010-09-02 18:52:36 UTC (rev 3495) +++ branches/maven_scaleout/src/main/deploy/legacy/scripts/bigdataenv 2010-09-02 19:46:45 UTC (rev 3496) @@ -129,13 +129,13 @@ # configuration file. # export JAVA_OPTS="-server -ea \ - -Xmx512m \ -showversion \ -Dcom.sun.jini.jeri.tcp.useNIO=@USE_NIO@ \ -Djava.security.policy=${BIGDATA_POLICY} \ -Dlog4j.configuration=${BIGDATA_LOG4J_CONFIG} \ -Djava.util.logging.config.file=${BIGDATA_LOGGING_CONFIG} \ -Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_HOME} \ + -DappHome=@APP_HOME@ \ " # This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-02 18:52:46
|
Revision: 3495 http://bigdata.svn.sourceforge.net/bigdata/?rev=3495&view=rev Author: thompsonbry Date: 2010-09-02 18:52:36 +0000 (Thu, 02 Sep 2010) Log Message: ----------- Test suite for distinct binding sets operator and for an optional pipeline join using only the default sink. Reorganized the operator hierarchy somewhat. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/AbstractChunkedOrderedIteratorOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/AbstractPipelineOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ArrayBindingSet.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BindingSetPipelineOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/HashBindingSet.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPredicate.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineStartOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregation/DistinctBindingSetOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/DistinctElementFilter.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/EQ.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/EQConstant.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/NE.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/NEConstant.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/OR.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/eval/JoinGraph.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/ThreadLocalBufferFactory.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/IElementFilter.java branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/TestDeepCopy.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/aggregation/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/aggregation/TestDistinctBindingSets.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/ap/R.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/ap/TestPredicateAccessPath.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/constraint/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/MapBindingSetsOverShards.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/PipelineDelayOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/QueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/ReceiveBindingSets.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestPipelineUtility.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/OwlSameAsPropertiesExpandingIterator.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/AbstractInlineConstraint.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/magic/IRISUtils.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rules/MatchRule.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rules/RejectAnythingSameAsItself.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregation/Union.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/BOpConstraint.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/INBinarySearch.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/INConstraint.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/INHashMap.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/aggregation/TestUnionBindingSets.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/constraint/TestEQConstant.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/constraint/TestInBinarySearch.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/constraint/TestInHashMap.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/constraint/TestNE.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/constraint/TestNEConstant.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/constraint/TestOR.java Removed Paths: ------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/Union.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/IN.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/AbstractChunkedOrderedIteratorOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/AbstractChunkedOrderedIteratorOp.java 2010-09-02 13:49:55 UTC (rev 3494) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/AbstractChunkedOrderedIteratorOp.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -30,8 +30,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import com.bigdata.bop.ap.Predicate; - /** * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/AbstractPipelineOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/AbstractPipelineOp.java 2010-09-02 13:49:55 UTC (rev 3494) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/AbstractPipelineOp.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -35,6 +35,9 @@ import com.bigdata.relation.accesspath.IBlockingBuffer; /** + * Abstract base class for pipelined operators regardless of the type of data + * moving along the pipeline. + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ArrayBindingSet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ArrayBindingSet.java 2010-09-02 13:49:55 UTC (rev 3494) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ArrayBindingSet.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -50,19 +50,9 @@ private static final long serialVersionUID = -6468905602211956490L; - protected static final Logger log = Logger.getLogger(ArrayBindingSet.class); - - /** - * True iff the {@link #log} level is INFO or less. - */ - protected static final boolean INFO = log.isInfoEnabled(); + private static final Logger log = Logger.getLogger(ArrayBindingSet.class); /** - * True iff the {@link #log} level is DEBUG or less. - */ - protected static final boolean DEBUG = log.isDebugEnabled(); - - /** * A dense array of the bound variables. */ private final IVariable[] vars; @@ -77,7 +67,7 @@ /** * Copy constructor. */ - protected ArrayBindingSet(ArrayBindingSet bindingSet) { + protected ArrayBindingSet(final ArrayBindingSet bindingSet) { if (bindingSet == null) throw new IllegalArgumentException(); @@ -100,18 +90,17 @@ * @param vals * Their bound values. */ - public ArrayBindingSet(IVariable[] vars, IConstant[] vals) { + public ArrayBindingSet(final IVariable[] vars, final IConstant[] vals) { -// if (vars == null) -// throw new IllegalArgumentException(); -// -// if (vals == null) -// throw new IllegalArgumentException(); + if (vars == null) + throw new IllegalArgumentException(); - assert vars != null; - assert vals != null; - assert vars.length == vals.length; + if (vals == null) + throw new IllegalArgumentException(); + if(vars.length != vals.length) + throw new IllegalArgumentException(); + // for (int i = 0; i < vars.length; i++) { // // if (vars[i] == null) @@ -139,7 +128,7 @@ * @throws IllegalArgumentException * if the <i>capacity</i> is negative. */ - public ArrayBindingSet(int capacity) { + public ArrayBindingSet(final int capacity) { if (capacity < 0) throw new IllegalArgumentException(); @@ -244,7 +233,7 @@ * Since the array is dense (no gaps), {@link #clear(IVariable)} requires * that we copy down any remaining elements in the array by one position. */ - public void clear(IVariable var) { + public void clear(final IVariable var) { if (var == null) throw new IllegalArgumentException(); @@ -282,7 +271,7 @@ } - public IConstant get(IVariable var) { + public IConstant get(final IVariable var) { if (var == null) throw new IllegalArgumentException(); @@ -301,7 +290,7 @@ } - public boolean isBound(IVariable var) { + public boolean isBound(final IVariable var) { return get(var) != null; @@ -315,10 +304,11 @@ if (val == null) throw new IllegalArgumentException(); - if(DEBUG) { - - log.debug("var=" + var + ", val=" + val + ", nbound=" + nbound+", capacity="+vars.length); - + if (log.isTraceEnabled()) { + + log.trace("var=" + var + ", val=" + val + ", nbound=" + nbound + + ", capacity=" + vars.length); + } for (int i = 0; i < nbound; i++) { @@ -343,7 +333,7 @@ public String toString() { - StringBuilder sb = new StringBuilder(); + final StringBuilder sb = new StringBuilder(); sb.append("{"); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BindingSetPipelineOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BindingSetPipelineOp.java 2010-09-02 13:49:55 UTC (rev 3494) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BindingSetPipelineOp.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -1,15 +1,51 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 2, 2010 + */ + package com.bigdata.bop; +import java.util.Map; + /** - * Interface for evaluating pipeline operations producing and consuming chunks - * of binding sets. + * Abstract base class for pipeline operators where the data moving along the + * pipeline is chunks of {@link IBindingSet}s. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ */ -public interface BindingSetPipelineOp extends PipelineOp<IBindingSet> { +abstract public class BindingSetPipelineOp extends + AbstractPipelineOp<IBindingSet> { - public interface Annotations extends PipelineOp.Annotations { + /** + * + */ + private static final long serialVersionUID = 1L; + public interface Annotations extends AbstractPipelineOp.Annotations { + /** * The value of the annotation is the {@link BOp.Annotations#BOP_ID} of * the ancestor in the operator tree which serves as an alternative sink @@ -20,4 +56,26 @@ } + /** + * Required deep copy constructor. + * + * @param op + */ + protected BindingSetPipelineOp(AbstractPipelineOp<IBindingSet> op) { + super(op); + } + + /** + * Shallow copy constructor. + * + * @param args + * @param annotations + */ + protected BindingSetPipelineOp(final BOp[] args, + final Map<String, Object> annotations) { + + super(args, annotations); + + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/HashBindingSet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/HashBindingSet.java 2010-09-02 13:49:55 UTC (rev 3494) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/HashBindingSet.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -32,15 +32,20 @@ import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; +import java.util.LinkedList; import java.util.Map; import java.util.Map.Entry; - /** * {@link IBindingSet} backed by a {@link HashMap}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ + * + * @todo Since {@link Var}s allow reference testing, a faster implementation + * could be written based on a {@link LinkedList}. Just scan the list + * until the entry is found with the desired {@link Var} reference and + * then return it. */ public class HashBindingSet implements IBindingSet { @@ -93,7 +98,28 @@ } - public boolean isBound(IVariable var) { + public HashBindingSet(final IVariable[] vars, final IConstant[] vals) { + + if (vars == null) + throw new IllegalArgumentException(); + + if (vals == null) + throw new IllegalArgumentException(); + + if (vars.length != vals.length) + throw new IllegalArgumentException(); + + map = new LinkedHashMap<IVariable, IConstant>(vars.length); + + for (int i = 0; i < vars.length; i++) { + + map.put(vars[i], vals[i]); + + } + + } + + public boolean isBound(final IVariable var) { if (var == null) throw new IllegalArgumentException(); @@ -102,7 +128,7 @@ } - public IConstant get(IVariable var) { + public IConstant get(final IVariable var) { if (var == null) throw new IllegalArgumentException(); @@ -111,7 +137,7 @@ } - public void set(IVariable var, IConstant val) { + public void set(final IVariable var, final IConstant val) { if (var == null) throw new IllegalArgumentException(); @@ -123,7 +149,7 @@ } - public void clear(IVariable var) { + public void clear(final IVariable var) { if (var == null) throw new IllegalArgumentException(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPredicate.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPredicate.java 2010-09-02 13:49:55 UTC (rev 3494) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPredicate.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -30,7 +30,7 @@ import java.io.Serializable; -import com.bigdata.bop.ap.Union; +import com.bigdata.bop.join.PipelineJoin; import com.bigdata.mdi.PartitionLocator; import com.bigdata.relation.IMutableRelation; import com.bigdata.relation.IRelation; @@ -85,6 +85,8 @@ /** * <code>true</code> iff the predicate is optional (the right operand of * a left join). + * + * @deprecated This flag is being moved to the join operator. */ String OPTIONAL = "optional"; @@ -131,8 +133,8 @@ * @param index * The index into the array of relation names in the view. * - * @deprecated Unions of predicates must be handled explicitly. See - * {@link Union}. + * @deprecated Unions of predicates must be handled explicitly as a union of + * pipeline operators reading against the different predicate. */ public String getRelationName(int index); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineStartOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineStartOp.java 2010-09-02 13:49:55 UTC (rev 3494) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineStartOp.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -29,7 +29,6 @@ import java.util.Map; import java.util.concurrent.Callable; -import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import com.bigdata.bop.engine.BOpStats; @@ -44,8 +43,7 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ -public class PipelineStartOp extends AbstractPipelineOp<IBindingSet> implements - BindingSetPipelineOp { +public class PipelineStartOp extends BindingSetPipelineOp { /** * @@ -82,8 +80,6 @@ */ static private class CopyTask implements Callable<Void> { -// private final BOpContext<IBindingSet> context; - private final BOpStats stats; private final IAsynchronousIterator<IBindingSet[]> source; @@ -91,8 +87,6 @@ private final IBlockingBuffer<IBindingSet[]> sink; CopyTask(final BOpContext<IBindingSet> context) { - -// this.context = context; stats = context.getStats(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregation/DistinctBindingSetOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregation/DistinctBindingSetOp.java 2010-09-02 13:49:55 UTC (rev 3494) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregation/DistinctBindingSetOp.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -7,10 +7,10 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.FutureTask; -import com.bigdata.bop.AbstractPipelineOp; -import com.bigdata.bop.ArrayBindingSet; import com.bigdata.bop.BOp; import com.bigdata.bop.BOpContext; +import com.bigdata.bop.BindingSetPipelineOp; +import com.bigdata.bop.HashBindingSet; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IConstant; import com.bigdata.bop.IVariable; @@ -25,14 +25,14 @@ * @version $Id: DistinctElementFilter.java 3466 2010-08-27 14:28:04Z * thompsonbry $ */ -public class DistinctBindingSetOp extends AbstractPipelineOp<IBindingSet>{ +public class DistinctBindingSetOp extends BindingSetPipelineOp { /** * */ private static final long serialVersionUID = 1L; - public interface Annotations extends BOp.Annotations { + public interface Annotations extends BindingSetPipelineOp.Annotations { /** * The initial capacity of the {@link ConcurrentHashMap} used to impose @@ -40,7 +40,7 @@ * * @see #DEFAULT_INITIAL_CAPACITY */ - String INITIAL_CAPACITY = "initialCapacity"; + String INITIAL_CAPACITY = DistinctBindingSetOp.class.getName()+".initialCapacity"; int DEFAULT_INITIAL_CAPACITY = 16; @@ -50,7 +50,7 @@ * * @see #DEFAULT_LOAD_FACTOR */ - String LOAD_FACTOR = "loadFactor"; + String LOAD_FACTOR = DistinctBindingSetOp.class.getName()+".loadFactor"; float DEFAULT_LOAD_FACTOR = .75f; @@ -60,7 +60,7 @@ * * @see #DEFAULT_CONCURRENCY_LEVEL */ - String CONCURRENCY_LEVEL = "concurrencyLevel"; + String CONCURRENCY_LEVEL = DistinctBindingSetOp.class.getName()+".concurrencyLevel"; int DEFAULT_CONCURRENCY_LEVEL = 16; @@ -170,7 +170,7 @@ if (vals.length != t.vals.length) return false; for (int i = 0; i < vals.length; i++) { - // @todo allow for nulls. + // @todo verify that this allows for nulls with a unit test. if (vals[i] == t.vals[i]) continue; if (vals[i] == null) @@ -185,7 +185,7 @@ /** * Task executing on the node. */ - private class DistinctTask implements Callable<Void> { + static private class DistinctTask implements Callable<Void> { private final BOpContext<IBindingSet> context; @@ -209,8 +209,8 @@ this.vars = op.getVariables(); this.map = new ConcurrentHashMap<Solution, Solution>( - getInitialCapacity(), getLoadFactor(), - getConcurrencyLevel()); + op.getInitialCapacity(), op.getLoadFactor(), + op.getConcurrencyLevel()); } @@ -230,16 +230,13 @@ for (int i = 0; i < vars.length; i++) { - if ((r[i] = bset.get(vars[i])) == null) { + /* + * Note: This allows null's. + * + * @todo write a unit test when some variables are not bound. + */ + r[i] = bset.get(vars[i]); - /* - * @todo probably allow nulls, but write a unit test for it. - */ - - throw new RuntimeException("Not bound: " + vars[i]); - - } - } final Solution s = new Solution(r); @@ -283,14 +280,7 @@ // System.err.println("accepted: " // + Arrays.toString(vals)); - /* - * @todo This may cause problems since the - * ArrayBindingSet does not allow mutation with - * variables not declared up front. In that case use - * new HashBindingSet( new ArrayBindingSet(...)). - */ - - accepted.add(new ArrayBindingSet(vars, vals)); + accepted.add(new HashBindingSet(vars, vals)); naccepted++; Copied: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregation/Union.java (from rev 3466, branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/Union.java) =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregation/Union.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregation/Union.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -0,0 +1,135 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Aug 18, 2010 + */ + +package com.bigdata.bop.aggregation; + +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.FutureTask; + +import com.bigdata.bop.BOpContext; +import com.bigdata.bop.BindingSetPipelineOp; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.engine.Haltable; +import com.bigdata.bop.join.PipelineJoin; +import com.bigdata.rdf.rules.TMUtility; +import com.bigdata.relation.RelationFusedView; + +/** + * The union of two or more {@link BindingSetPipelineOp} operators. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + * + * @todo I have some basic questions about the ability to use a UNION of two + * predicates in scale-out. I think that this might be more accurately + * modeled as the UNION of two joins. That is, rather than: + * + * <pre> + * JOIN( ..., + * UNION( foo.spo(A,loves,B), + * bar.spo(A,loves,B) ) + * ) + * </pre> + * + * using + * + * <pre> + * UNION( JOIN( ..., foo.spo(A,loves,B) ), + * JOIN( ..., bar.spo(A,loves,B) ) + * ) + * </pre> + * + * which would be a binding set union rather than an element union. + * + * @todo The union of access paths was historically handled by + * {@link RelationFusedView}. That class should be removed once queries + * are rewritten to use the union of joins. + * + * @todo The {@link TMUtility} will have to be updated to use this operator + * rather than specifying multiple source "names" for the relation of the + * predicate. + * + * @todo The FastClosureRuleTask will also need to be updated to use a + * {@link Union} over the joins rather than a {@link RelationFusedView}. + */ +public class Union extends BindingSetPipelineOp { + + /** + * + */ + private static final long serialVersionUID = 1L; + + /** + * @param args + * Two or more operators whose union is desired. + * @param annotations + */ + public Union(final BindingSetPipelineOp[] args, + final Map<String, Object> annotations) { + + super(args, annotations); + + if (args.length < 2) + throw new IllegalArgumentException(); + + } + + public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { + + return new FutureTask<Void>(new UnionTask(this, context)); + + } + + /** + * Pipeline union impl. + * + * FIXME All this does is copy its inputs to its outputs. Since we only run + * one chunk of input at a time, it seems that the easiest way to implement + * a union is to have the operators in the union just target the same sink. + */ + private static class UnionTask extends Haltable<Void> implements Callable<Void> { + + public UnionTask(// + final Union op,// + final BOpContext<IBindingSet> context + ) { + + if (op == null) + throw new IllegalArgumentException(); + if (context == null) + throw new IllegalArgumentException(); + } + + public Void call() throws Exception { + // TODO Auto-generated method stub + throw new UnsupportedOperationException(); + } + + } + +} Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/DistinctElementFilter.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/DistinctElementFilter.java 2010-09-02 13:49:55 UTC (rev 3494) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/DistinctElementFilter.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -1,58 +1,46 @@ package com.bigdata.bop.ap; -import java.util.UUID; -import java.util.concurrent.Callable; +import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.FutureTask; +import com.bigdata.bop.BOp; import com.bigdata.bop.BOpBase; -import com.bigdata.bop.BOp; -import com.bigdata.bop.BOpList; +import com.bigdata.bop.BOpContext; import com.bigdata.bop.IBindingSet; -import com.bigdata.bop.IVariable; -import com.bigdata.bop.NV; -import com.bigdata.bop.aggregation.DistinctBindingSetOp; -import com.bigdata.btree.keys.KeyBuilder; -import com.bigdata.rdf.relation.rule.BindingSetSortKeyBuilder; import com.bigdata.rdf.spo.DistinctSPOIterator; -import com.bigdata.relation.accesspath.IBlockingBuffer; -import com.bigdata.relation.rule.eval.IJoinNexus; -import com.bigdata.relation.rule.eval.ISolution; +import com.bigdata.rdf.spo.SPO; +import com.bigdata.relation.accesspath.IElementFilter; import com.bigdata.striterator.DistinctFilter; import com.bigdata.striterator.IChunkConverter; import com.bigdata.striterator.MergeFilter; /** - * A DISTINCT operator based on a hash table. + * A DISTINCT operator based for elements in a relation. The operator is based + * on a hash table. New elements are constructed for each original element in + * which only the distinct fields are preserved. If the new element is distinct + * then it is passed by the filter. + * <p> + * The filter is capable of changing the type of the accepted elements. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id: DistinctElementFilter.java 3466 2010-08-27 14:28:04Z * thompsonbry $ + * * @param <E> + * The generic type of the source elements for the filter. + * @param <F> + * The generic type of the elements passed by the filter. * - * @todo could have an implementation backed by a persistent hash map using an - * extensible hash function to automatically grow the persistence store. - * This could be a general purpose persistent hash functionality, but it - * could also operate against a temporary file when used in the context of - * a query (the backing file can be destroyed afterwards or the data can - * be simply written onto the current temporary store). + * @todo support changing the generic type as part of the filter. this is + * similar to the {@link IChunkConverter}. * - * @todo Consider the use of lock amortization (batching) to reduce contention - * for the backing map. Alternatively, we could accept entire blocks of - * elements from a single source at a time, which would single thread us - * through the map. Or bound the #of threads hitting the map at once, - * increase the map concurrency level, etc. - * * @todo Reconcile with {@link IChunkConverter}, {@link DistinctFilter} (handles * solutions) and {@link MergeFilter} (handles comparables), - * {@link DistinctSPOIterator}, {@link DistinctBindingSetOp}, etc. + * {@link DistinctSPOIterator}, etc. */ -public class DistinctElementFilter<E> -extends BOpBase -//extends AbstractChunkedIteratorOp<E> -//implements IElementFilter<E>, -// implements IConstraint, -// implements ChunkedIteratorOp<E> -{ +public class DistinctElementFilter<E> extends BOpBase implements + IElementFilter<E> { /** * @@ -61,104 +49,272 @@ public interface Annotations extends BOp.Annotations { - String INITIAL_CAPACITY = "initialCapacity"; + /** + * The initial capacity of the {@link ConcurrentHashMap} used to impose + * the distinct constraint. + * + * @see #DEFAULT_INITIAL_CAPACITY + */ + String INITIAL_CAPACITY = DistinctElementFilter.class.getName() + + ".initialCapacity"; - String LOAD_FACTOR = "loadFactor"; + int DEFAULT_INITIAL_CAPACITY = 16; - String CONCURRENCY_LEVEL = "concurrencyLevel"; + /** + * The load factor of the {@link ConcurrentHashMap} used to impose the + * distinct constraint. + * + * @see #DEFAULT_LOAD_FACTOR + */ + String LOAD_FACTOR = DistinctElementFilter.class.getName() + + ".loadFactor"; - } + float DEFAULT_LOAD_FACTOR = .75f; - public DistinctElementFilter(final IVariable<?>[] distinctList, - final UUID masterUUID) { + /** + * The concurrency level of the {@link ConcurrentHashMap} used to impose + * the distinct constraint. + * + * @see #DEFAULT_CONCURRENCY_LEVEL + */ + String CONCURRENCY_LEVEL = DistinctElementFilter.class.getName() + + ".concurrencyLevel"; - super(distinctList, NV.asMap(new NV[] { - // new NV(Annotations.QUERY_ID, masterUUID), - // new NV(Annotations.BOP_ID, bopId) - })); + int DEFAULT_CONCURRENCY_LEVEL = 16; - if (masterUUID == null) - throw new IllegalArgumentException(); + /** + * The set of fields whose values must be distinct. + * + * @todo abstract base class to allow easy override for specific element + * types such as {@link SPO}. + */ + String FIELDS = DistinctElementFilter.class.getName() + ".fields"; + /** + * An optional constraint on the runtime type of the elements which are + * acceptable to this filter. + * + * @see IElementFilter#canAccept(Object) + * + * @todo I am not convinced that we need this. It parallels something + * which was introduced into the {@link IElementFilter} interface, + * but I suspect that we do not need that either. + */ + String CLASS_CONSTRAINT = DistinctElementFilter.class.getName() + + ".classConstraint"; + } -// public Future<Void> eval(final IBigdataFederation<?> fed, -// final IJoinNexus joinNexus, final IBlockingBuffer<E[]> buffer) { -// -// final FutureTask<Void> ft = new FutureTask<Void>(new DHTTask(joinNexus, -// buffer)); -// -// joinNexus.getIndexManager().getExecutorService().execute(ft); -// -// return ft; -// -// } + /** + * Required deep copy constructor. + */ + public DistinctElementFilter(final DistinctElementFilter<E> op) { + super(op); + } /** - * Task executing on the node. + * Required shallow copy constructor. */ - private class DHTTask implements Callable<Void> { + public DistinctElementFilter(final BOp[] args, + final Map<String, Object> annotations) { - private final IJoinNexus joinNexus; + super(args, annotations); - private final IBlockingBuffer<E[]> buffer; + final int[] fields = getFields(); - private final ConcurrentHashMap<byte[], Void> map; + if (fields == null) + throw new IllegalArgumentException(); - /* Note: This is NOT thread safe! */ - private final BindingSetSortKeyBuilder sortKeyBuilder; - - DHTTask(final IJoinNexus joinNexus, - final IBlockingBuffer<E[]> buffer) { + if (fields.length == 0) + throw new IllegalArgumentException(); - this.joinNexus = joinNexus; - - this.buffer = buffer; + } - final IVariable<?>[] vars = ((BOpList) get(0/* distinctList */)) - .toArray(new IVariable[0]); + /** + * @see Annotations#INITIAL_CAPACITY + */ + public int getInitialCapacity() { - this.sortKeyBuilder = new BindingSetSortKeyBuilder(KeyBuilder - .newInstance(), vars); + return getProperty(Annotations.INITIAL_CAPACITY, + Annotations.DEFAULT_INITIAL_CAPACITY); - this.map = new ConcurrentHashMap<byte[], Void>(/* - * @todo initialCapacity using annotations - * @todo loadFactor ... - * @todo concurrencyLevel ... - */); - } + } - private boolean accept(final IBindingSet bset) { + /** + * @see Annotations#LOAD_FACTOR + */ + public float getLoadFactor() { - return map.putIfAbsent(sortKeyBuilder.getSortKey(bset), null) == null; + return getProperty(Annotations.LOAD_FACTOR, + Annotations.DEFAULT_LOAD_FACTOR); - } + } - public Void call() throws Exception { + /** + * @see Annotations#CONCURRENCY_LEVEL + */ + public int getConcurrencyLevel() { - /* - * FIXME Setup to drain binding sets from the source. Note that the - * sort key builder is not thread safe, so a pool of key builders - * with a non-default initial capacity (LT 1024) might be used to - * allow higher concurrency for key building. - * - * Alternatively, the caller could generate the keys (SOUNDS GOOD) - * and just ship the byte[] keys to the DHTFilter. - * - * The DHTFilter needs to send back its boolean[] responses bit - * coded or run length coded. See AbstractArrayIndexProcedure which - * already does some of that. Those responses should move through - * NIO Buffers just like everything else, but the response will be - * much smaller than the incoming byte[][] (aka IRaba). - */ - throw new UnsupportedOperationException(); + return getProperty(Annotations.CONCURRENCY_LEVEL, + Annotations.DEFAULT_CONCURRENCY_LEVEL); - } + } + public int[] getFields() { + + return (int[]) getProperty(Annotations.FIELDS); + } + + public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { - // public ResultBitBuffer bulkFilter(final K[] elements) { - // - // } +// return new FutureTask<Void>(new DistinctTask<E>(this, context)); + throw new UnsupportedOperationException(); + + } + public boolean accept(E e) { + // TODO Auto-generated method stub + return false; + } + + public boolean canAccept(Object o) { + // @todo by annotation giving an optional type constraint. + return true; + } + +// /** +// * Task executing on the node. +// */ +// static private class DistinctTask<E> implements Callable<Void> { +// +// private final BOpContext<IBindingSet> context; +// +// /** +// * A concurrent map whose keys are the bindings on the specified +// * variables (the keys and the values are the same since the map +// * implementation does not allow <code>null</code> values). +// */ +// private /*final*/ ConcurrentHashMap<E, E> map; +// +// /** +// * The variables used to impose a distinct constraint. +// */ +// private final int[] fields; +// +// DistinctTask(final DistinctElementFilter<E> op, +// final BOpContext<IBindingSet> context) { +// +// this.context = context; +// +// this.fields = op.getFields(); +// +// this.map = new ConcurrentHashMap<E, E>( +// op.getInitialCapacity(), op.getLoadFactor(), +// op.getConcurrencyLevel()); +// +// } +// +// /** +// * Construct an element are distinct for the configured variables then return +// * those bindings. +// * +// * @param bset +// * The binding set to be filtered. +// * +// * @return The distinct as bound values -or- <code>null</code> if the +// * binding set duplicates a solution which was already accepted. +// */ +// private E accept(final E e) { +// +// final E e2 = newElement(e); +// +// final boolean distinct = map.putIfAbsent(e2, e2) == null; +// +// return distinct ? e2 : null; +// +// } +// +// public Void call() throws Exception { +// +// final BOpStats stats = context.getStats(); +// +// final IAsynchronousIterator<IBindingSet[]> itr = context +// .getSource(); +// +// final IBlockingBuffer<IBindingSet[]> sink = context.getSink(); +// +// try { +// +// while (itr.hasNext()) { +// +// final IBindingSet[] a = itr.next(); +// +// stats.chunksIn.increment(); +// stats.unitsIn.add(a.length); +// +// final List<IBindingSet> accepted = new LinkedList<IBindingSet>(); +// +// int naccepted = 0; +// +// for (IBindingSet bset : a) { +// +//// System.err.println("considering: " + bset); +// +// final IConstant<?>[] vals = accept(bset); +// +// if (vals != null) { +// +//// System.err.println("accepted: " +//// + Arrays.toString(vals)); +// +// /* +// * @todo This may cause problems since the +// * ArrayBindingSet does not allow mutation with +// * variables not declared up front. In that case use +// * new HashBindingSet( new ArrayBindingSet(...)). +// */ +// +// accepted.add(new ArrayBindingSet(vars, vals)); +// +// naccepted++; +// +// } +// +// } +// +// if (naccepted > 0) { +// +// final IBindingSet[] b = accepted +// .toArray(new IBindingSet[naccepted]); +// +//// System.err.println("output: " +//// + Arrays.toString(b)); +// +// sink.add(b); +// +// stats.unitsOut.add(naccepted); +// stats.chunksOut.increment(); +// +// } +// +// } +// +// // done. +// return null; +// +// } finally { +// +// sink.flush(); +// sink.close(); +// +// // discard the map. +// map = null; +// +// } +// +// } +// +// } + } Deleted: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/Union.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/Union.java 2010-09-02 13:49:55 UTC (rev 3494) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/Union.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -1,139 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Aug 18, 2010 - */ - -package com.bigdata.bop.ap; - -import java.util.Map; - -import com.bigdata.bop.AbstractChunkedOrderedIteratorOp; -import com.bigdata.bop.BOp; -import com.bigdata.bop.ChunkedOrderedIteratorOp; -import com.bigdata.bop.IPredicate; -import com.bigdata.bop.engine.MapBindingSetsOverShards; -import com.bigdata.rdf.rules.TMUtility; -import com.bigdata.relation.RelationFusedView; -import com.bigdata.relation.rule.eval.IJoinNexus; -import com.bigdata.service.IBigdataFederation; -import com.bigdata.service.proxy.IRemoteChunkedIterator; -import com.bigdata.striterator.ChunkedOrderedStriterator; -import com.bigdata.striterator.IChunkedOrderedIterator; -import com.ibm.icu.impl.ByteBuffer; - -/** - * An operator which returns the union of two {@link IPredicate}s. Elements are - * consumed first from the left predicate and then from the right predicate. - * This operator does not cross network boundaries. An intermediate send / - * receive operator pattern must be applied when this operator is used in a - * scale-out context. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - * - * @todo I have some basic questions about the ability to use a UNION of two - * predicates in scale-out. I think that this might be more accurately - * modeled as the UNION of two joins. That is, rather than: - * - * <pre> - * JOIN( ..., - * UNION( foo.spo(A,loves,B), - * bar.spo(A,loves,B) ) - * ) - * </pre> - * using - * <pre> - * UNION( JOIN( ..., foo.spo(A,loves,B) ), - * JOIN( ..., bar.spo(A,loves,B) ) - * ) - * </pre> - * which would be a binding set union rather than an element union. - * - * @todo This was historically handled by {@link RelationFusedView} which should - * be removed when this class is implemented. - * - * @todo The {@link TMUtility} will have to be updated to use this operator - * rather than specifying multiple source "names" for the relation of the - * predicate. - * - * @todo The FastClosureRuleTask will also need to be updated to use a - * {@link Union} rather than a {@link RelationFusedView}. - * - * @todo It would be a trivial generalization to make this an N-ary union. - * - * @todo A similar operator could be defined where child operands to execute - * concurrently and the result is no longer strongly ordered. - * - * @todo Implement the send/receive pattern. - * <p> - * This COULD be done using {@link IRemoteChunkedIterator} if the send and - * receive operators are appropriately decorated in order to pass the - * proxy object along. - * <p> - * This SHOULD be implemented using an NIO direct {@link ByteBuffer} - * pattern similar to {@link MapBindingSetsOverShards}. - */ -public class Union<E> extends AbstractChunkedOrderedIteratorOp<E> { - - /** - * - */ - private static final long serialVersionUID = 1L; - - /** - * @param left - * @param rigtht - * @param annotations - */ - public Union(final ChunkedOrderedIteratorOp<E> left, - final ChunkedOrderedIteratorOp<E> right, - final Map<String, Object> annotations) { - - super(new BOp[] { left, right }, annotations); - - } - - @SuppressWarnings("unchecked") - protected ChunkedOrderedIteratorOp<E> left() { - return (ChunkedOrderedIteratorOp<E>)get(0); - } - - @SuppressWarnings("unchecked") - protected ChunkedOrderedIteratorOp<E> right() { - return (ChunkedOrderedIteratorOp<E>)get(1); - } - - @SuppressWarnings("unchecked") - public IChunkedOrderedIterator<E> eval(final IBigdataFederation<?> fed, - final IJoinNexus joinNexus) { - - return (IChunkedOrderedIterator<E>) new ChunkedOrderedStriterator<IChunkedOrderedIterator<E>, E>(// - left().eval(fed, joinNexus)).append(// - right().eval(fed, joinNexus)// - ); - - } - -} Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/BOpConstraint.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/BOpConstraint.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/BOpConstraint.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -0,0 +1,66 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 2, 2010 + */ + +package com.bigdata.bop.constraint; + +import java.util.Map; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.BOpBase; +import com.bigdata.bop.IConstraint; + +/** + * Abstract base class for constraint operators. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +abstract public class BOpConstraint extends BOpBase implements IConstraint { + + /** + * + */ + private static final long serialVersionUID = 1L; + + /** + * Required deep copy constructor. + * @param op + */ + public BOpConstraint(BOpBase op) { + super(op); + } + + /** + * Required shallow copy constructor. + * @param args + * @param annotations + */ + public BOpConstraint(BOp[] args, Map<String, Object> annotations) { + super(args, annotations); + } + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/BOpConstraint.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/EQ.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/EQ.java 2010-09-02 13:49:55 UTC (rev 3494) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/EQ.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -26,11 +26,9 @@ import java.util.Map; -import com.bigdata.bop.BOpBase; import com.bigdata.bop.BOp; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IConstant; -import com.bigdata.bop.IConstraint; import com.bigdata.bop.IVariable; /** @@ -39,7 +37,7 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ -public class EQ extends BOpBase implements IConstraint { +public class EQ extends BOpConstraint { private static final long serialVersionUID = 1L; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/EQConstant.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/EQConstant.java 2010-09-02 13:49:55 UTC (rev 3494) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/EQConstant.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -26,11 +26,9 @@ import java.util.Map; -import com.bigdata.bop.BOpBase; import com.bigdata.bop.BOp; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IConstant; -import com.bigdata.bop.IConstraint; import com.bigdata.bop.IVariable; /** @@ -39,7 +37,7 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ -public class EQConstant extends BOpBase implements IConstraint { +public class EQConstant extends BOpConstraint { /** * @@ -62,7 +60,7 @@ public EQConstant(final IVariable<?> var, final IConstant<?> val) { - super(new BOp[] { var, val }); + super(new BOp[] { var, val }, null/*annotations*/); if (var == null) throw new IllegalArgumentException(); Deleted: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/IN.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/IN.java 2010-09-02 13:49:55 UTC (rev 3494) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/IN.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -1,197 +0,0 @@ -/** - -The Notice below must appear in each file of the Source Code of any -copy you distribute of the Licensed Product. Contributors to any -Modifications may add their own copyright notices to identify their -own contributions. - -License: - -The contents of this file are subject to the CognitiveWeb Open Source -License Version 1.1 (the License). You may not copy or use this file, -in either source code or executable form, except in compliance with -the License. You may obtain a copy of the License from - - http://www.CognitiveWeb.org/legal/license/ - -Software distributed under the License is distributed on an AS IS -basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See -the License for the specific language governing rights and limitations -under the License. - -Copyrights: - -Portions created by or assigned to CognitiveWeb are Copyright -(c) 2003-2003 CognitiveWeb. All Rights Reserved. Contact -information for CognitiveWeb is available at - - http://www.CognitiveWeb.org - -Portions Copyright (c) 2002-2003 Bryan Thompson. - -Acknowledgements: - -Special thanks to the developers of the Jabber Open Source License 1.0 -(JOSL), from which this License was derived. This License contains -terms that differ from JOSL. - -Special thanks to the CognitiveWeb Open Source Contributors for their -suggestions and support of the Cognitive Web. - -Modifications: - -*/ -/* - * Created on Jun 17, 2008 - */ - -package com.bigdata.bop.constraint; - -import java.util.Arrays; -import java.util.HashSet; -import java.util.Map; - -import com.bigdata.bop.BOpBase; -import com.bigdata.bop.BOp; -import com.bigdata.bop.BOpList; -import com.bigdata.bop.IBindingSet; -import com.bigdata.bop.IConstant; -import com.bigdata.bop.IConstraint; -import com.bigdata.bop.IVariable; -import com.bigdata.rdf.spo.InGraphBinarySearchFilter; -import com.bigdata.rdf.spo.InGraphHashSetFilter; - -/** - * A constraint that a variable may only take on the bindings enumerated by some - * set. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - * - * @todo This uses binary search, which is thread-safe. It could also use a - * {@link HashSet}, but the {@link HashSet} needs to be thread-safe since - * the filter could be applied concurrently during evaluation. - * - * FIXME Reconcile this with {@link InGraphBinarySearchFilter} and - * {@link InGraphHashSetFilter} and also with the use of an in-memory join - * against the incoming binding sets to handle SPARQL data sets. - */ -public class IN<T> extends BOpBase implements IConstraint { - -// /** -// * -// */ -// private static final long serialVersionUID = 5805883429399100605L; -// -// private final IVariable<T> x; -// -// private final T[] set; - - /** - * - */ - private static final long serialVersionUID = 1L; - - /** - * The sorted data (cached). - * <p> - * Note: This cache is redundant with the 2nd argument to the operator. It - * is not serialized and is compiled on demand when the operator is used. - */ - private transient volatile T[] set; - - /** - * Deep copy constructor. - */ - public IN(final IN<T> op) { - super(op); - } - - /** - * Shallow copy constructor. - */ - public IN(final BOp[] args, final Map<String, Object> annotations) { - - // @todo validate args? - super(args, annotations); - - } - - /** - * - * @param x - * Some variable. - * @param set - * A set of legal term identifiers providing a constraint on the - * allowable values for that variable. - */ - public IN(final IVariable<T> x, final IConstant<T>[] set) { - - super(new BOp[] { x, new BOpList(set) }); - - if (x == null || set == null) - throw new IllegalArgumentException(); - - if (set.length == 0) - throw new IllegalArgumentException(); - - } - - @SuppressWarnings("unchecked") - static private <T> T[] sort(final BOpList set) { - - final int n = set.arity(); - - if (n == 0) - throw new IllegalArgumentException(); - - final T firstValue = ((IConstant<T>) set.get(0)).get(); - - // allocate an array of the correct type. - final T[] tmp = (T[]) java.lang.reflect.Array.newInstance(firstValue - .getClass(), n); - - for (int i = 0; i < n; i++) { - - // dereference the constants to their bound values. - tmp[i] = ((IConstant<T>) set.get(i)).get(); - - } - - // sort the bound values. - Arrays.sort(tmp); - - return tmp; - - } - - public boolean accept(final IBindingSet bindingSet) { - - if(set == null) { - - set = sort((BOpList) get(1)); - - } - - // get binding for "x". - @SuppressWarnings("unchecked") - final IConstant<T> x = bindingSet.get((IVariable<?>) get(0)/* x */); - - if (x == null) { - - // not yet bound. - return true; - - } - - final T v = x.get(); - - // lookup the bound value in the set of values. - final int pos = Arrays.binarySearch(set, v); - - // true iff the bound value was found in the set. - return pos >= 0; - - } - -} Copied: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/INBinarySearch.java (from rev 3466, branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/IN.java) =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/INBinarySearch.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/INBinarySearch.java 2010-09-02 18:52:36 UTC (rev 3495) @@ -0,0 +1,201 @@ +/** + +The Notice below must appear in each file of the Source Code of any +copy you distribute of the Licensed Product. Contributors to any +Modifications may add their own copyright notices to identify their +own contributions. + +License: + +The contents of this file are subject to the CognitiveWeb Open Source +License Version 1.1 (the License). You may not copy or use this file, +in either source code or executable form, except in compliance with +the License. You may obtain a copy of the License from + + http://www.CognitiveWeb.org/legal/license/ + +Software distributed under the License is distributed on an AS IS +basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +the License for the specific language governing rights and limitations +under the License. + +Copyrights: + +Portion... [truncated message content] |
From: <fko...@us...> - 2010-09-02 13:50:01
|
Revision: 3494 http://bigdata.svn.sourceforge.net/bigdata/?rev=3494&view=rev Author: fkoliver Date: 2010-09-02 13:49:55 +0000 (Thu, 02 Sep 2010) Log Message: ----------- Remove references to logging through SimpleSocketLogger, leaving log files local to each system. Modified Paths: -------------- branches/maven_scaleout/src/main/deploy/legacy/config/README branches/maven_scaleout/src/main/deploy/legacy/config/standalone/log4j.properties branches/maven_scaleout/src/main/deploy/legacy/install.properties branches/maven_scaleout/src/main/deploy/legacy/install.xml branches/maven_scaleout/src/main/deploy/legacy/scripts/bigdataenv branches/maven_scaleout/src/main/deploy/legacy/scripts/bigdataup Removed Paths: ------------- branches/maven_scaleout/src/main/deploy/legacy/scripts/runLog4jServer.sh Modified: branches/maven_scaleout/src/main/deploy/legacy/config/README =================================================================== --- branches/maven_scaleout/src/main/deploy/legacy/config/README 2010-09-01 22:48:57 UTC (rev 3493) +++ branches/maven_scaleout/src/main/deploy/legacy/config/README 2010-09-02 13:49:55 UTC (rev 3494) @@ -14,10 +14,6 @@ to control the log levels for jini/river, which uses java logging rather than log4j. -log4jServer.properties - A default log4j configuration file if you decide to - use the log4j SimpleSocketServer to aggregate log4j - messages from the federation. - policy.all - A default java permissions file. This file grants ALL permissions. You may specify a more rigorous security policy. Modified: branches/maven_scaleout/src/main/deploy/legacy/config/standalone/log4j.properties =================================================================== --- branches/maven_scaleout/src/main/deploy/legacy/config/standalone/log4j.properties 2010-09-01 22:48:57 UTC (rev 3493) +++ branches/maven_scaleout/src/main/deploy/legacy/config/standalone/log4j.properties 2010-09-02 13:49:55 UTC (rev 3494) @@ -1,24 +1,7 @@ # log4j client configuration. # -# Note: This is the configuration that the application should use. It -# sends log events to a log4j server using the [socketLogger]. This -# lets you aggregate log events across a cluster. The log4j server -# handles things like rolling over the log files. +# Note: This is the configuration that the application should use. # -# Note: If the network link to the log4j server goes down, then the -# application will block. This makes that link a single point of -# failure. -# -# Note: If the log4j server goes down, then a warning will be logged -# on stderr for the application. -# -# Note: The application will proceed no faster than the TCP packets -# containing the log events that are being sent to the server. This -# should not be a problem unless very detailed logging is turned on. -# -# Note: The layout is NOT used by the SocketAppender. The layout is -# decided on the other end by the log4j server's configuration file. -# # You can also enable local logging. You will have to choose a well # known location for the log file since the same log4j configuration # is used by all of the services (at least, by all of the same service @@ -29,15 +12,6 @@ # Configure appenders to be used. ## -# Setup relays log messages to a remote socket logger. The remote socket -# logger is then configured in [log4jServer.properties]. It will log ALL -# on the detailLog, ERROR+ on the errorLog and events on the eventLog. -# However, it will ONLY log those messages which are logged here onto the -# [socketLogger]. -# -# Note: ERROR+ is also logged onto a local console so you can see any problems -# during startup when the socket logger might not be running. -# # Note: You can control many of the logger levels (on a service-by-service # bases) using remote JMX MBeans. # Modified: branches/maven_scaleout/src/main/deploy/legacy/install.properties =================================================================== --- branches/maven_scaleout/src/main/deploy/legacy/install.properties 2010-09-01 22:48:57 UTC (rev 3493) +++ branches/maven_scaleout/src/main/deploy/legacy/install.properties 2010-09-02 13:49:55 UTC (rev 3494) @@ -1,6 +1,5 @@ # Bigdata ant build properties. # -# $Id$ ## # Properties for installing bigdata. Many of these properties are both by the @@ -131,32 +130,6 @@ # file is completely open. policyFile=${install.config.dir}/policy/policy.all -# The host that will run the log4j SimpleSocketLogger and the port on which -# the logger will listen. This gets written into the bigdata configuration -# file and the log4j.properties file such that the logger daemon will startup -# on this host and the clients and services will log onto a socket appender -# which logs onto this host. log4j.properties (the file used by the clients -# and services) is setup to log INFO+ onto this service. It will also log -# ERROR+ onto the local console in case the socket logger is down. The socket -# logger is setup in log4jServer.properties. It logs ERROR+ onto the errorLog -# (see below), INFO+ onto the detailLog (see below), and events onto the -# eventLog (see below). -# -# Note: java.util.logging messages DO NOT get written onto this logger -- only -# log4j messages. -# -LOG4J_SOCKET_LOGGER_HOST = localhost -LOG4J_SOCKET_LOGGER_PORT = 4445 - -# The socket logger uses a DailyRollingFileAppender by default and this -# specifies the DatePattern property which determines both when the file -# will be rolled over and the name of the rolled over log file. -# -# Note: You are responsible for pruning old log files! -# -# roll over at midnight. -LOG4J_DATE_PATTERN='.'yyyy-MM-dd'.log' - # The log4j configuration file for the clients and services. This is used # to set the log4j.configuration property. # @@ -164,12 +137,6 @@ # log4j.config=file:${install.config.dir}/logging/log4j.properties -# The log4j configuration file for the SimpleSocketServer. -# -# Note: This is a FILE (not a URL) -# -log4jServer.config=${install.config.dir}/logging/log4jServer.properties - # The java.util.logging configuration file. (Jini uses java.util.logging). # # Note: The java.util.logging system DOES NOT use the simple socket logger. @@ -320,4 +287,4 @@ # The directory in which the performance tests will be run. This directory needs to be # on a volume with a lot of room. The directory may be destroyed (by the test harness) # after the performance tests have run their course. -perf.run.dir=/usr/bigdata/runs \ No newline at end of file +perf.run.dir=/usr/bigdata/runs Modified: branches/maven_scaleout/src/main/deploy/legacy/install.xml =================================================================== --- branches/maven_scaleout/src/main/deploy/legacy/install.xml 2010-09-01 22:48:57 UTC (rev 3493) +++ branches/maven_scaleout/src/main/deploy/legacy/install.xml 2010-09-02 13:49:55 UTC (rev 3494) @@ -97,10 +97,6 @@ <replacefilter token="@BIGDATA_CONFIG@" value="${bigdata.config}" /> <replacefilter token="@JINI_CONFIG@" value="${jini.config}" /> <replacefilter token="@POLICY_FILE@" value="${policyFile}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_HOST@" value="${LOG4J_SOCKET_LOGGER_HOST}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_PORT@" value="${LOG4J_SOCKET_LOGGER_PORT}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_CONFIG@" value="${log4jServer.config}" /> - <replacefilter token="@LOG4J_DATE_PATTERN@" value="${LOG4J_DATE_PATTERN}" /> <replacefilter token="@LOG4J_CONFIG@" value="${log4j.config}" /> <replacefilter token="@LOGGING_CONFIG@" value="${logging.config}" /> <replacefilter token="@ERROR_LOG@" value="${errorLog}" /> @@ -121,7 +117,6 @@ <replacefilter token="@FED@" value="${FED}" /> <replacefilter token="@NAS@" value="${NAS}" /> <replacefilter token="@LAS@" value="${LAS}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_HOST@" value="${LOG4J_SOCKET_LOGGER_HOST}" /> <replacefilter token="@JAVA_HOME@" value="${JAVA_HOME}" /> <replacefilter token="@JINI_CLASS_SERVER_PORT@" value="${JINI_CLASS_SERVER_PORT}" /> <replacefilter token="@LOAD_BALANCER_PORT@" value="${LOAD_BALANCER_PORT}" /> @@ -137,10 +132,6 @@ <replacefilter token="@BIGDATA_CONFIG@" value="${bigdata.config}" /> <replacefilter token="@JINI_CONFIG@" value="${jini.config}" /> <replacefilter token="@POLICY_FILE@" value="${policyFile}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_HOST@" value="${LOG4J_SOCKET_LOGGER_HOST}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_PORT@" value="${LOG4J_SOCKET_LOGGER_PORT}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_CONFIG@" value="${log4jServer.config}" /> - <replacefilter token="@LOG4J_DATE_PATTERN@" value="${LOG4J_DATE_PATTERN}" /> <replacefilter token="@LOG4J_CONFIG@" value="${log4j.config}" /> <replacefilter token="@LOGGING_CONFIG@" value="${logging.config}" /> <replacefilter token="@ERROR_LOG@" value="${errorLog}" /> Modified: branches/maven_scaleout/src/main/deploy/legacy/scripts/bigdataenv =================================================================== --- branches/maven_scaleout/src/main/deploy/legacy/scripts/bigdataenv 2010-09-01 22:48:57 UTC (rev 3493) +++ branches/maven_scaleout/src/main/deploy/legacy/scripts/bigdataenv 2010-09-02 13:49:55 UTC (rev 3494) @@ -79,25 +79,6 @@ export CLASSPATH="@CLASSPATH@" # -# Java options used to start utility classes. The utility classes -# typically do not have large heap demands. This does NOT include -# the CLASSPATH so it is easier to extend the classpath for 3rd -# party components. -# -# Note: The java options for the services are configured in the main -# configuration file. -# -export JAVA_OPTS="-server -ea \ - -showversion \ - -Dcom.sun.jini.jeri.tcp.useNIO=@USE_NIO@ \ - -Djava.security.policy=${BIGDATA_POLICY} \ - -Dlog4j.configuration=${BIGDATA_LOG4J_CONFIG} \ - -Djava.util.logging.config.file=${BIGDATA_LOGGING_CONFIG} \ - -Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_HOME} \ - -DappHome=@APP_HOME@ \ -" - -# # Other things required by the 'bigdata' script. # # $lockFile - The bigdata subsystem lock file. @@ -139,6 +120,25 @@ export BIGDATA_LOGGING_CONFIG=@LOGGING_CONFIG@ # +# Java options used to start utility classes. The utility classes +# typically do not have large heap demands. This does NOT include +# the CLASSPATH so it is easier to extend the classpath for 3rd +# party components. +# +# Note: The java options for the services are configured in the main +# configuration file. +# +export JAVA_OPTS="-server -ea \ + -Xmx512m \ + -showversion \ + -Dcom.sun.jini.jeri.tcp.useNIO=@USE_NIO@ \ + -Djava.security.policy=${BIGDATA_POLICY} \ + -Dlog4j.configuration=${BIGDATA_LOG4J_CONFIG} \ + -Djava.util.logging.config.file=${BIGDATA_LOGGING_CONFIG} \ + -Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_HOME} \ +" + +# # Location of the various log files. # export logDir=@LOG_DIR@ Modified: branches/maven_scaleout/src/main/deploy/legacy/scripts/bigdataup =================================================================== --- branches/maven_scaleout/src/main/deploy/legacy/scripts/bigdataup 2010-09-01 22:48:57 UTC (rev 3493) +++ branches/maven_scaleout/src/main/deploy/legacy/scripts/bigdataup 2010-09-02 13:49:55 UTC (rev 3494) @@ -58,18 +58,6 @@ fi echo $"`date` : `hostname` : created lock file." -# @todo note the pid and look for it before attempting to start the service. -# -# Note: Explicitly specify a small heap. -if [ "@LOG4J_SOCKET_LOGGER_HOST@" == "`hostname`" ]; then - echo $"`date` : `hostname` : starting log4j server." - java -cp ${CLASSPATH} \ - -Xmx400m \ - org.apache.log4j.net.SimpleSocketServer \ - @LOG4J_SOCKET_LOGGER_PORT@ \ - @LOG4J_SOCKET_LOGGER_CONFIG@& -fi - # Start the services manager on this host. # # Note: This explicitly specifies a small heap for the services manager since Deleted: branches/maven_scaleout/src/main/deploy/legacy/scripts/runLog4jServer.sh =================================================================== --- branches/maven_scaleout/src/main/deploy/legacy/scripts/runLog4jServer.sh 2010-09-01 22:48:57 UTC (rev 3493) +++ branches/maven_scaleout/src/main/deploy/legacy/scripts/runLog4jServer.sh 2010-09-02 13:49:55 UTC (rev 3494) @@ -1,23 +0,0 @@ -#!/bin/bash - -# A server that writes log4j messages onto stdout. -# -# Note: You need a log4j "server" configuration file for this. It -# only needs to specify the appender (where to write the stuff) and -# the layout. You should control what gets logged in the log4j -# configuration file used by the applications generating the log events -# so that you don't spam the network with log events that will not be -# logged by the server. -# -# Note: This can also be started automatically on the configured host -# from the main bigdata configuration file. -# -# Note: JAVA_OPTS is ignored since it specifies the log4j configuration -# and we do not want to use that here. - -source `dirname $0`/bigdataenv - -java -cp ${CLASSPATH} \ - org.apache.log4j.net.SimpleSocketServer \ - @LOG4J_SOCKET_LOGGER_PORT@ \ - @LOG4J_SOCKET_LOGGER_CONFIG@ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-01 22:49:05
|
Revision: 3493 http://bigdata.svn.sourceforge.net/bigdata/?rev=3493&view=rev Author: thompsonbry Date: 2010-09-01 22:48:57 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Fixed bug in test harness for verifying solutions. Added a hash-map based distinct solutions operator. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/HashBindingSet.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/DistinctElementFilter.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/aggregation/TestDistinctBindingSets.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregation/DistinctBindingSetOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/ThreadLocalBufferFactory.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/HashBindingSet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/HashBindingSet.java 2010-09-01 21:16:52 UTC (rev 3492) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/HashBindingSet.java 2010-09-01 22:48:57 UTC (rev 3493) @@ -66,11 +66,32 @@ * * @param src */ - protected HashBindingSet(HashBindingSet src) { + protected HashBindingSet(final HashBindingSet src) { map = new LinkedHashMap<IVariable, IConstant>(src.map); } + + /** + * Copy constructor. + * + * @param src + */ + public HashBindingSet(final IBindingSet src) { + + map = new LinkedHashMap<IVariable, IConstant>(src.size()); + + final Iterator<Map.Entry<IVariable, IConstant>> itr = src.iterator(); + + while (itr.hasNext()) { + + final Map.Entry<IVariable, IConstant> e = itr.next(); + + map.put(e.getKey(), e.getValue()); + + } + + } public boolean isBound(IVariable var) { @@ -119,7 +140,7 @@ public String toString() { - StringBuilder sb = new StringBuilder(); + final StringBuilder sb = new StringBuilder(); sb.append("{ "); Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregation/DistinctBindingSetOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregation/DistinctBindingSetOp.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregation/DistinctBindingSetOp.java 2010-09-01 22:48:57 UTC (rev 3493) @@ -0,0 +1,335 @@ +package com.bigdata.bop.aggregation; + +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.FutureTask; + +import com.bigdata.bop.AbstractPipelineOp; +import com.bigdata.bop.ArrayBindingSet; +import com.bigdata.bop.BOp; +import com.bigdata.bop.BOpContext; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IConstant; +import com.bigdata.bop.IVariable; +import com.bigdata.bop.engine.BOpStats; +import com.bigdata.relation.accesspath.IAsynchronousIterator; +import com.bigdata.relation.accesspath.IBlockingBuffer; + +/** + * A pipelined DISTINCT operator based on a hash table. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: DistinctElementFilter.java 3466 2010-08-27 14:28:04Z + * thompsonbry $ + */ +public class DistinctBindingSetOp extends AbstractPipelineOp<IBindingSet>{ + + /** + * + */ + private static final long serialVersionUID = 1L; + + public interface Annotations extends BOp.Annotations { + + /** + * The initial capacity of the {@link ConcurrentHashMap} used to impose + * the distinct constraint. + * + * @see #DEFAULT_INITIAL_CAPACITY + */ + String INITIAL_CAPACITY = "initialCapacity"; + + int DEFAULT_INITIAL_CAPACITY = 16; + + /** + * The load factor of the {@link ConcurrentHashMap} used to impose + * the distinct constraint. + * + * @see #DEFAULT_LOAD_FACTOR + */ + String LOAD_FACTOR = "loadFactor"; + + float DEFAULT_LOAD_FACTOR = .75f; + + /** + * The concurrency level of the {@link ConcurrentHashMap} used to impose + * the distinct constraint. + * + * @see #DEFAULT_CONCURRENCY_LEVEL + */ + String CONCURRENCY_LEVEL = "concurrencyLevel"; + + int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The variables on which the distinct constraint will be imposed. + * Binding sets with distinct values for the specified variables will be + * passed on. + */ + String VARIABLES = DistinctBindingSetOp.class.getName() + ".variables"; + + } + + /** + * Required deep copy constructor. + */ + public DistinctBindingSetOp(final DistinctBindingSetOp op) { + super(op); + } + + /** + * Required shallow copy constructor. + */ + public DistinctBindingSetOp(final BOp[] args, + final Map<String, Object> annotations) { + + super(args, annotations); + + final IVariable<?>[] vars = getVariables(); + + if (vars == null) + throw new IllegalArgumentException(); + + if (vars.length == 0) + throw new IllegalArgumentException(); + + } + + /** + * @see Annotations#INITIAL_CAPACITY + */ + public int getInitialCapacity() { + + return getProperty(Annotations.INITIAL_CAPACITY, + Annotations.DEFAULT_INITIAL_CAPACITY); + + } + + /** + * @see Annotations#LOAD_FACTOR + */ + public float getLoadFactor() { + + return getProperty(Annotations.LOAD_FACTOR, + Annotations.DEFAULT_LOAD_FACTOR); + + } + + /** + * @see Annotations#CONCURRENCY_LEVEL + */ + public int getConcurrencyLevel() { + + return getProperty(Annotations.CONCURRENCY_LEVEL, + Annotations.DEFAULT_CONCURRENCY_LEVEL); + + } + + /** + * @see Annotations#VARIABLES + */ + public IVariable<?>[] getVariables() { + + return (IVariable<?>[]) annotations.get(Annotations.VARIABLES); + + } + + public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { + + return new FutureTask<Void>(new DistinctTask(this, context)); + + } + + /** + * Wrapper used for the as bound solutions in the {@link ConcurrentHashMap}. + */ + private static class Solution { + private final int hash; + + private final IConstant<?>[] vals; + + public Solution(final IConstant<?>[] vals) { + this.vals = vals; + this.hash = java.util.Arrays.hashCode(vals); + } + + public int hashCode() { + return hash; + } + + public boolean equals(final Object o) { + if (this == o) + return true; + if (!(o instanceof Solution)) { + return false; + } + final Solution t = (Solution) o; + if (vals.length != t.vals.length) + return false; + for (int i = 0; i < vals.length; i++) { + // @todo allow for nulls. + if (vals[i] == t.vals[i]) + continue; + if (vals[i] == null) + return false; + if (!vals[i].equals(t.vals[i])) + return false; + } + return true; + } + } + + /** + * Task executing on the node. + */ + private class DistinctTask implements Callable<Void> { + + private final BOpContext<IBindingSet> context; + + /** + * A concurrent map whose keys are the bindings on the specified + * variables (the keys and the values are the same since the map + * implementation does not allow <code>null</code> values). + */ + private /*final*/ ConcurrentHashMap<Solution, Solution> map; + + /** + * The variables used to impose a distinct constraint. + */ + private final IVariable<?>[] vars; + + DistinctTask(final DistinctBindingSetOp op, + final BOpContext<IBindingSet> context) { + + this.context = context; + + this.vars = op.getVariables(); + + this.map = new ConcurrentHashMap<Solution, Solution>( + getInitialCapacity(), getLoadFactor(), + getConcurrencyLevel()); + + } + + /** + * If the bindings are distinct for the configured variables then return + * those bindings. + * + * @param bset + * The binding set to be filtered. + * + * @return The distinct as bound values -or- <code>null</code> if the + * binding set duplicates a solution which was already accepted. + */ + private IConstant<?>[] accept(final IBindingSet bset) { + + final IConstant<?>[] r = new IConstant<?>[vars.length]; + + for (int i = 0; i < vars.length; i++) { + + if ((r[i] = bset.get(vars[i])) == null) { + + /* + * @todo probably allow nulls, but write a unit test for it. + */ + + throw new RuntimeException("Not bound: " + vars[i]); + + } + + } + + final Solution s = new Solution(r); + + final boolean distinct = map.putIfAbsent(s, s) == null; + + return distinct ? r : null; + + } + + public Void call() throws Exception { + + final BOpStats stats = context.getStats(); + + final IAsynchronousIterator<IBindingSet[]> itr = context + .getSource(); + + final IBlockingBuffer<IBindingSet[]> sink = context.getSink(); + + try { + + while (itr.hasNext()) { + + final IBindingSet[] a = itr.next(); + + stats.chunksIn.increment(); + stats.unitsIn.add(a.length); + + final List<IBindingSet> accepted = new LinkedList<IBindingSet>(); + + int naccepted = 0; + + for (IBindingSet bset : a) { + +// System.err.println("considering: " + bset); + + final IConstant<?>[] vals = accept(bset); + + if (vals != null) { + +// System.err.println("accepted: " +// + Arrays.toString(vals)); + + /* + * @todo This may cause problems since the + * ArrayBindingSet does not allow mutation with + * variables not declared up front. In that case use + * new HashBindingSet( new ArrayBindingSet(...)). + */ + + accepted.add(new ArrayBindingSet(vars, vals)); + + naccepted++; + + } + + } + + if (naccepted > 0) { + + final IBindingSet[] b = accepted + .toArray(new IBindingSet[naccepted]); + +// System.err.println("output: " +// + Arrays.toString(b)); + + sink.add(b); + + stats.unitsOut.add(naccepted); + stats.chunksOut.increment(); + + } + + } + + // done. + return null; + + } finally { + + sink.flush(); + sink.close(); + + // discard the map. + map = null; + + } + + } + + } + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregation/DistinctBindingSetOp.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/DistinctElementFilter.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/DistinctElementFilter.java 2010-09-01 21:16:52 UTC (rev 3492) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/DistinctElementFilter.java 2010-09-01 22:48:57 UTC (rev 3493) @@ -10,8 +10,10 @@ import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IVariable; import com.bigdata.bop.NV; +import com.bigdata.bop.aggregation.DistinctBindingSetOp; import com.bigdata.btree.keys.KeyBuilder; import com.bigdata.rdf.relation.rule.BindingSetSortKeyBuilder; +import com.bigdata.rdf.spo.DistinctSPOIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; import com.bigdata.relation.rule.eval.IJoinNexus; import com.bigdata.relation.rule.eval.ISolution; @@ -23,7 +25,8 @@ * A DISTINCT operator based on a hash table. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ + * @version $Id: DistinctElementFilter.java 3466 2010-08-27 14:28:04Z + * thompsonbry $ * @param <E> * * @todo could have an implementation backed by a persistent hash map using an @@ -40,7 +43,8 @@ * increase the map concurrency level, etc. * * @todo Reconcile with {@link IChunkConverter}, {@link DistinctFilter} (handles - * solutions) and {@link MergeFilter} (handles comparables). + * solutions) and {@link MergeFilter} (handles comparables), + * {@link DistinctSPOIterator}, {@link DistinctBindingSetOp}, etc. */ public class DistinctElementFilter<E> extends BOpBase @@ -62,14 +66,15 @@ String LOAD_FACTOR = "loadFactor"; String CONCURRENCY_LEVEL = "concurrencyLevel"; - + } - public DistinctElementFilter(final IVariable<?>[] distinctList, final UUID masterUUID) { + public DistinctElementFilter(final IVariable<?>[] distinctList, + final UUID masterUUID) { super(distinctList, NV.asMap(new NV[] { -// new NV(Annotations.QUERY_ID, masterUUID), - // new NV(Annotations.BOP_ID, bopId) + // new NV(Annotations.QUERY_ID, masterUUID), + // new NV(Annotations.BOP_ID, bopId) })); if (masterUUID == null) Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2010-09-01 21:16:52 UTC (rev 3492) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2010-09-01 22:48:57 UTC (rev 3493) @@ -35,7 +35,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; @@ -54,7 +53,6 @@ import com.bigdata.bop.IVariable; import com.bigdata.bop.engine.BOpStats; import com.bigdata.bop.engine.Haltable; -import com.bigdata.btree.AbstractBTree; import com.bigdata.btree.BytesUtil; import com.bigdata.btree.keys.IKeyBuilder; import com.bigdata.counters.CAT; @@ -66,7 +64,6 @@ import com.bigdata.relation.accesspath.IAccessPath; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; -import com.bigdata.relation.accesspath.IBuffer; import com.bigdata.relation.rule.IRule; import com.bigdata.relation.rule.IStarJoin; import com.bigdata.relation.rule.IStarJoin.IStarConstraint; @@ -385,193 +382,8 @@ */ final PipelineJoinStats stats; - /** - * A factory pattern for per-thread objects whose life cycle is tied to - * some container. For example, there may be an instance of this pool - * for a {@link JoinTask} or an {@link AbstractBTree}. The pool can be - * torn down when the container is torn down, which prevents its - * thread-local references from escaping. - * - * @author tho...@us... - * @param <T> - * The generic type of the thread-local object. - * - * @todo There should be two implementations of a common interface or - * abstract base class: one based on a private - * {@link ConcurrentHashMap} and the other on striped locks. The - * advantage of the {@link ConcurrentHashMap} is approximately 3x - * higher concurrency. The advantage of striped locks is that you - * can directly manage the #of buffers when when the threads using - * those buffers is unbounded. However, doing so could lead to - * deadlock since two threads can be hashed onto the same buffer - * object. - */ - abstract public class ThreadLocalFactory<T extends IBuffer<E>, E> { + final private ThreadLocalBufferFactory<AbstractUnsynchronizedArrayBuffer<IBindingSet>, IBindingSet> threadLocalBufferFactory = new ThreadLocalBufferFactory<AbstractUnsynchronizedArrayBuffer<IBindingSet>, IBindingSet>() { - /** - * The thread-local queues. - */ - private final ConcurrentHashMap<Thread, T> map; - - /** - * A list of all objects visible to the caller. This is used to - * ensure that any objects allocated by the factory are visited. - * - * <p> - * Note: Since the collection is not thread-safe, synchronization is - * required when adding to the collection and when visiting the - * elements of the collection. - */ - private final LinkedList<T> list = new LinkedList<T>(); - - protected ThreadLocalFactory() { - - this(16/* initialCapacity */, .75f/* loadFactor */, 16/* concurrencyLevel */); - - } - - protected ThreadLocalFactory(final int initialCapacity, - final float loadFactor, final int concurrencyLevel) { - - map = new ConcurrentHashMap<Thread, T>(initialCapacity, - loadFactor, concurrencyLevel); - - } - - /** - * Return the #of thread-local objects. - */ - final public int size() { - - return map.size(); - - } - - /** - * Add the element to the thread-local buffer. - * - * @param e - * An element. - * - * @throws IllegalStateException - * if the factory is asynchronously closed. - */ - public void add(final E e) { - - get().add(e); - - } - - /** - * Return a thread-local buffer - * - * @return The thread-local buffer. - * - * @throws RuntimeException - * if the join is halted. - */ - final private T get() { - final Thread t = Thread.currentThread(); - T tmp = map.get(t); - if (tmp == null) { - if (map.put(t, tmp = initialValue()) != null) { - /* - * Note: Since the key is the thread it is not possible - * for there to be a concurrent put of an entry under - * the same key so we do not have to use putIfAbsent(). - */ - throw new AssertionError(); - } - // Add to list. - synchronized (list) { - list.add(tmp); - } - } - halted(); - return tmp; - } - - /** - * Flush each of the unsynchronized buffers onto their backing - * synchronized buffer. - * - * @throws RuntimeException - * if the join is halted. - */ - public void flush() { - synchronized (list) { - int n = 0; - long m = 0L; - for (T b : list) { - halted(); - // #of elements to be flushed. - final int size = b.size(); - // flush, returning total #of elements written onto this - // buffer. - final long counter = b.flush(); - m += counter; - if (log.isDebugEnabled()) - log.debug("Flushed buffer: size=" + size - + ", counter=" + counter); - } - if (log.isInfoEnabled()) - log.info("Flushed " + n - + " unsynchronized buffers totalling " + m - + " elements"); - } - } - - /** - * Reset each of the synchronized buffers, discarding their buffered - * writes. - * <p> - * Note: This method is used during error processing, therefore it - * DOES NOT check {@link JoinTask#halt}. - */ - public void reset() { - synchronized (list) { - int n = 0; - for (T b : list) { - // #of elements in the buffer before reset(). - final int size = b.size(); - // reset the buffer. - b.reset(); - if (log.isDebugEnabled()) - log.debug("Reset buffer: size=" + size); - } - if (log.isInfoEnabled()) - log.info("Reset " + n + " unsynchronized buffers"); - } - } - - // /** - // * Reset the per-{@link Thread} unsynchronized output buffers - // (used as - // * part of error handling for the {@link JoinTask}). - // */ - // final protected void resetUnsyncBuffers() throws Exception { - // - // final int n = threadLocalBufferFactory.reset(); - // .close(new - // Visitor<AbstractUnsynchronizedArrayBuffer<IBindingSet>>() { - // - // @Override - // public void meet( - // final AbstractUnsynchronizedArrayBuffer<IBindingSet> b) - // throws Exception { - // - // - // } - - /** - * Create and return a new object. - */ - abstract protected T initialValue(); - - } - - final private ThreadLocalFactory<AbstractUnsynchronizedArrayBuffer<IBindingSet>, IBindingSet> threadLocalBufferFactory = new ThreadLocalFactory<AbstractUnsynchronizedArrayBuffer<IBindingSet>, IBindingSet>() { - @Override protected AbstractUnsynchronizedArrayBuffer<IBindingSet> initialValue() { @@ -579,6 +391,14 @@ return newUnsyncOutputBuffer(); } + + @Override + protected void halted() { + + JoinTask.this.halted(); + + } + }; public String toString() { Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/ThreadLocalBufferFactory.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/ThreadLocalBufferFactory.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/ThreadLocalBufferFactory.java 2010-09-01 22:48:57 UTC (rev 3493) @@ -0,0 +1,232 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 1, 2010 + */ + +package com.bigdata.bop.join; + +import java.util.LinkedList; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.log4j.Logger; + +import com.bigdata.bop.engine.Haltable; +import com.bigdata.btree.AbstractBTree; +import com.bigdata.relation.accesspath.IBuffer; +import com.bigdata.relation.rule.eval.pipeline.JoinTask; + +/** + * A factory pattern for per-thread objects whose life cycle is tied to some + * container. For example, there may be an instance of this pool for a + * {@link JoinTask} or an {@link AbstractBTree}. The pool can be torn down when + * the container is torn down, which prevents its thread-local references from + * escaping. + * + * @author tho...@us... + * @version $Id$ + * @param <T> + * The generic type of the thread-local object. + * + * @todo There should be two implementations of a common interface or abstract + * base class: one based on a private {@link ConcurrentHashMap} and the + * other on striped locks. The advantage of the {@link ConcurrentHashMap} + * is approximately 3x higher concurrency. The advantage of striped locks + * is that you can directly manage the #of buffers when when the threads + * using those buffers is unbounded. However, doing so could lead to + * deadlock since two threads can be hashed onto the same buffer object. + * + * @todo refactor into our concurrency package? + */ +abstract public class ThreadLocalBufferFactory<T extends IBuffer<E>, E> { + + static private final Logger log = Logger + .getLogger(ThreadLocalBufferFactory.class); + + /** + * The thread-local queues. + */ + private final ConcurrentHashMap<Thread, T> map; + + /** + * A list of all objects visible to the caller. This is used to ensure that + * any objects allocated by the factory are visited. + * + * <p> + * Note: Since the collection is not thread-safe, synchronization is + * required when adding to the collection and when visiting the elements of + * the collection. + */ + private final LinkedList<T> list = new LinkedList<T>(); + + protected ThreadLocalBufferFactory() { + + this(16/* initialCapacity */, .75f/* loadFactor */, 16/* concurrencyLevel */); + + } + + protected ThreadLocalBufferFactory(final int initialCapacity, + final float loadFactor, final int concurrencyLevel) { + + map = new ConcurrentHashMap<Thread, T>(initialCapacity, loadFactor, + concurrencyLevel); + + } + + /** + * Return the #of thread-local objects. + */ + final public int size() { + + return map.size(); + + } + + /** + * Add the element to the thread-local buffer. + * + * @param e + * An element. + * + * @throws IllegalStateException + * if the factory is asynchronously closed. + */ + public void add(final E e) { + + get().add(e); + + } + + /** + * Return a thread-local buffer + * + * @return The thread-local buffer. + * + * @throws RuntimeException + * if the join is halted. + */ + final public T get() { + final Thread t = Thread.currentThread(); + T tmp = map.get(t); + if (tmp == null) { + if (map.put(t, tmp = initialValue()) != null) { + /* + * Note: Since the key is the thread it is not possible for + * there to be a concurrent put of an entry under the same key + * so we do not have to use putIfAbsent(). + */ + throw new AssertionError(); + } + // Add to list. + synchronized (list) { + list.add(tmp); + } + } + halted(); + return tmp; + } + + /** + * Flush each of the unsynchronized buffers onto their backing synchronized + * buffer. + * + * @throws RuntimeException + * if the join is halted. + */ + public void flush() { + synchronized (list) { + int n = 0; + long m = 0L; + for (T b : list) { + halted(); + // #of elements to be flushed. + final int size = b.size(); + // flush, returning total #of elements written onto this + // buffer. + final long counter = b.flush(); + m += counter; + if (log.isDebugEnabled()) + log.debug("Flushed buffer: size=" + size + ", counter=" + + counter); + } + if (log.isInfoEnabled()) + log.info("Flushed " + n + " unsynchronized buffers totalling " + + m + " elements"); + } + } + + /** + * Reset each of the synchronized buffers, discarding their buffered writes. + * <p> + * Note: This method is used during error processing, therefore it DOES NOT + * check {@link JoinTask#halt}. + */ + public void reset() { + synchronized (list) { + int n = 0; + for (T b : list) { + // #of elements in the buffer before reset(). + final int size = b.size(); + // reset the buffer. + b.reset(); + if (log.isDebugEnabled()) + log.debug("Reset buffer: size=" + size); + } + if (log.isInfoEnabled()) + log.info("Reset " + n + " unsynchronized buffers"); + } + } + + // /** + // * Reset the per-{@link Thread} unsynchronized output buffers + // (used as + // * part of error handling for the {@link JoinTask}). + // */ + // final protected void resetUnsyncBuffers() throws Exception { + // + // final int n = threadLocalBufferFactory.reset(); + // .close(new + // Visitor<AbstractUnsynchronizedArrayBuffer<IBindingSet>>() { + // + // @Override + // public void meet( + // final AbstractUnsynchronizedArrayBuffer<IBindingSet> b) + // throws Exception { + // + // + // } + + /** + * Create and return a new object. + */ + abstract protected T initialValue(); + + /** + * Test to see if the process has been halted. + * + * @see Haltable#halted() + */ + abstract protected void halted(); + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/ThreadLocalBufferFactory.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/aggregation/TestDistinctBindingSets.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/aggregation/TestDistinctBindingSets.java 2010-09-01 21:16:52 UTC (rev 3492) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/aggregation/TestDistinctBindingSets.java 2010-09-01 22:48:57 UTC (rev 3493) @@ -27,12 +27,35 @@ package com.bigdata.bop.aggregation; -import com.bigdata.bop.ap.DistinctElementFilter; +import java.util.LinkedList; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.FutureTask; import junit.framework.TestCase2; +import com.bigdata.bop.ArrayBindingSet; +import com.bigdata.bop.BOp; +import com.bigdata.bop.BOpContext; +import com.bigdata.bop.Constant; +import com.bigdata.bop.HashBindingSet; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IConstant; +import com.bigdata.bop.IVariable; +import com.bigdata.bop.NV; +import com.bigdata.bop.Var; +import com.bigdata.bop.engine.BOpStats; +import com.bigdata.bop.engine.TestQueryEngine; +import com.bigdata.journal.BufferMode; +import com.bigdata.journal.ITx; +import com.bigdata.journal.Journal; +import com.bigdata.relation.accesspath.IAsynchronousIterator; +import com.bigdata.relation.accesspath.IBlockingBuffer; +import com.bigdata.relation.accesspath.ThickAsynchronousIterator; + /** - * Unit tests for {@link DistinctElementFilter}. + * Unit tests for {@link DistinctBindingSetOp}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ @@ -52,14 +75,157 @@ super(name); } + @Override + public Properties getProperties() { + + final Properties p = new Properties(super.getProperties()); + + p.setProperty(Journal.Options.BUFFER_MODE, BufferMode.Transient + .toString()); + + return p; + + } + + Journal jnl = null; + + List<IBindingSet> data = null; + + public void setUp() throws Exception { + + jnl = new Journal(getProperties()); + + setUpData(); + + } + /** - * @todo write unit tests for distinct based on purely local evaluation. + * Setup the data. + */ + private void setUpData() { + + final Var<?> x = Var.var("x"); + final Var<?> y = Var.var("y"); + + data = new LinkedList<IBindingSet>(); + IBindingSet bset = null; + { + bset = new HashBindingSet(); + bset.set(x, new Constant<String>("John")); + bset.set(y, new Constant<String>("Mary")); + data.add(bset); + } + { + bset = new HashBindingSet(); + bset.set(x, new Constant<String>("Mary")); + bset.set(y, new Constant<String>("Paul")); + data.add(bset); + } + { + bset = new HashBindingSet(); + bset.set(x, new Constant<String>("Mary")); + bset.set(y, new Constant<String>("Jane")); + data.add(bset); + } + { + bset = new HashBindingSet(); + bset.set(x, new Constant<String>("Paul")); + bset.set(y, new Constant<String>("Leon")); + data.add(bset); + } + { + bset = new HashBindingSet(); + bset.set(x, new Constant<String>("Paul")); + bset.set(y, new Constant<String>("John")); + data.add(bset); + } + { + bset = new HashBindingSet(); + bset.set(x, new Constant<String>("Leon")); + bset.set(y, new Constant<String>("Paul")); + data.add(bset); + } + + } + + public void tearDown() throws Exception { + + if (jnl != null) { + jnl.destroy(); + jnl = null; + } + + // clear reference. + data = null; + + } + + /** + * Unit test for distinct. * - * @todo write unit tests for distinct based on a hash partitioned DISTINCT - * filter. + * @throws ExecutionException + * @throws InterruptedException */ - public void test_something() { - fail("write tests"); + public void test_distinct() throws InterruptedException, ExecutionException { + + final Var<?> x = Var.var("x"); +// final Var<?> y = Var.var("y"); + + final int distinctId = 1; + + final DistinctBindingSetOp query = new DistinctBindingSetOp(new BOp[]{}, + NV.asMap(new NV[]{// + new NV(DistinctBindingSetOp.Annotations.BOP_ID,distinctId),// + new NV(DistinctBindingSetOp.Annotations.VARIABLES,new IVariable[]{x}),// + })); + + // the expected solutions + final IBindingSet[] expected = new IBindingSet[] {// + new ArrayBindingSet(// + new IVariable[] { x },// + new IConstant[] { new Constant<String>("John") }// + ),// + new ArrayBindingSet(// + new IVariable[] { x },// + new IConstant[] { new Constant<String>("Mary") }// + ), new ArrayBindingSet(// + new IVariable[] { x },// + new IConstant[] { new Constant<String>("Paul") }// + ), new ArrayBindingSet(// + new IVariable[] { x },// + new IConstant[] { new Constant<String>("Leon") }// + ), }; + + final BOpStats stats = query.newStats(); + + final IAsynchronousIterator<IBindingSet[]> source = new ThickAsynchronousIterator<IBindingSet[]>( + new IBindingSet[][] { data.toArray(new IBindingSet[0]) }); + + final IBlockingBuffer<IBindingSet[]> sink = query.newBuffer(); + + final BOpContext<IBindingSet> context = new BOpContext<IBindingSet>( + null/* fed */, jnl/* indexManager */, + ITx.READ_COMMITTED/* readTimestamp */, + ITx.UNISOLATED/* writeTimestamp */, -1/* partitionId */, stats, + source, sink, null/* sink2 */); + + // get task. + final FutureTask<Void> ft = query.eval(context); + + // execute task. + jnl.getExecutorService().execute(ft); + + TestQueryEngine.assertSolutions(expected, sink.iterator()); + + assertTrue(ft.isDone()); + assertFalse(ft.isCancelled()); + ft.get(); // verify nothing thrown. + + assertEquals(1L, stats.chunksIn.get()); + assertEquals(6L, stats.unitsIn.get()); + assertEquals(4L, stats.unitsOut.get()); + assertEquals(1L, stats.chunksOut.get()); + } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/RunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/RunningQuery.java 2010-09-01 21:16:52 UTC (rev 3492) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/RunningQuery.java 2010-09-01 22:48:57 UTC (rev 3493) @@ -56,6 +56,7 @@ import com.bigdata.bop.IConstraint; import com.bigdata.bop.IPredicate; import com.bigdata.bop.NoSuchBOpException; +import com.bigdata.bop.aggregation.DistinctBindingSetOp; import com.bigdata.bop.ap.Predicate; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; @@ -187,10 +188,12 @@ * <p> * This is guarded by the {@link #runningStateLock}. * - * FIXME {@link IConstraint}s for {@link PipelineJoin}, distinct elements - * and other filters for {@link IPredicate}s, conditional routing for - * binding sets in the pipeline (to route around an optional join group - * based on an {@link IConstraint}), and then buffer management for s/o. + * FIXME {@link IConstraint}s for {@link PipelineJoin}, non-distributed + * filters for {@link IPredicate}s, distinct element filter for + * {@link IPredicate} which is capable of distributed operations, + * conditional routing for binding sets in the pipeline (to route around an + * optional join group based on an {@link IConstraint}), SPARQL to BOP + * translation, and then buffer management for s/o. * * @todo SCALEOUT: Life cycle management of the operators and the query * implies both a per-query bop:NodeList map on the query coordinator Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-01 21:16:52 UTC (rev 3492) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-01 22:48:57 UTC (rev 3493) @@ -153,14 +153,20 @@ public void tearDown() throws Exception { - if (queryEngine != null) + if (queryEngine != null) { queryEngine.shutdownNow(); + queryEngine = null; + } - if (bufferService != null) + if (bufferService != null) { bufferService.shutdownNow(); + bufferService = null; + } - if (jnl != null) + if (jnl != null) { jnl.destroy(); + jnl = null; + } } @@ -573,7 +579,7 @@ * @param expected * @param itr */ - protected void assertSolutions(final IBindingSet[] expected, + static public void assertSolutions(final IBindingSet[] expected, final IAsynchronousIterator<IBindingSet[]> itr) { try { int n = 0; @@ -588,9 +594,10 @@ fail("n=" + n + ", expected=" + expected[n] + ", actual=" + e[i]); } + n++; } - n++; } + assertEquals("Wrong number of solutions", expected.length, n); } finally { itr.close(); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java 2010-09-01 21:16:52 UTC (rev 3492) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java 2010-09-01 22:48:57 UTC (rev 3493) @@ -50,6 +50,7 @@ import com.bigdata.bop.ap.E; import com.bigdata.bop.ap.Predicate; import com.bigdata.bop.ap.R; +import com.bigdata.bop.engine.TestQueryEngine; import com.bigdata.bop.join.PipelineJoin.PipelineJoinStats; import com.bigdata.journal.BufferMode; import com.bigdata.journal.ITx; @@ -143,8 +144,13 @@ public void tearDown() throws Exception { - if (jnl != null) + if (jnl != null) { + jnl.destroy(); + + jnl = null; + + } } @@ -252,20 +258,22 @@ // execute task. jnl.getExecutorService().execute(ft); - final IAsynchronousIterator<IBindingSet[]> itr = sink.iterator(); - try { - int n = 0; - while (itr.hasNext()) { - final IBindingSet[] chunk = itr.next(); - if (log.isInfoEnabled()) - log.info(n + " : chunkSize=" + chunk.length); - for (int i = 0; i < chunk.length; i++) { - assertTrue(expected[n++].equals(chunk[i])); - } - } - } finally { - itr.close(); - } + TestQueryEngine.assertSolutions(expected, sink.iterator()); +// final IAsynchronousIterator<IBindingSet[]> itr = sink.iterator(); +// try { +// int n = 0; +// while (itr.hasNext()) { +// final IBindingSet[] chunk = itr.next(); +// if (log.isInfoEnabled()) +// log.info(n + " : chunkSize=" + chunk.length); +// for (int i = 0; i < chunk.length; i++) { +// assertTrue(expected[n++].equals(chunk[i])); +// } +// } +// assertEquals(n, expected.length); +// } finally { +// itr.close(); +// } // join task assertEquals(1L, stats.chunksIn.get()); @@ -366,6 +374,7 @@ // assertTrue(expected[n++].equals(chunk[i])); // } // } +// assertEquals(n, expected.length); // } finally { // itr.close(); // } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <fko...@us...> - 2010-09-01 21:16:59
|
Revision: 3492 http://bigdata.svn.sourceforge.net/bigdata/?rev=3492&view=rev Author: fkoliver Date: 2010-09-01 21:16:52 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Add serviceJoin(Service...) methods to match existing serviceJoin(IService...) methods for smart proxies. Modified Paths: -------------- branches/maven_scaleout/src/main/java/com/bigdata/service/AbstractFederation.java branches/maven_scaleout/src/main/java/com/bigdata/service/DefaultClientDelegate.java branches/maven_scaleout/src/main/java/com/bigdata/service/DefaultServiceFederationDelegate.java branches/maven_scaleout/src/main/java/com/bigdata/service/IFederationDelegate.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/JiniFederation.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/util/ListServices.java branches/maven_scaleout/src/test/java/com/bigdata/resources/AbstractResourceManagerTestCase.java branches/maven_scaleout/src/test/java/com/bigdata/service/TestEventReceiver.java Modified: branches/maven_scaleout/src/main/java/com/bigdata/service/AbstractFederation.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/service/AbstractFederation.java 2010-09-01 20:02:58 UTC (rev 3491) +++ branches/maven_scaleout/src/main/java/com/bigdata/service/AbstractFederation.java 2010-09-01 21:16:52 UTC (rev 3492) @@ -78,7 +78,6 @@ * Abstract base class for {@link IBigdataFederation} implementations. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ * @param <T> * The generic type of the client or service. * @@ -947,8 +946,25 @@ /** * Delegated. {@inheritDoc} */ - public void serviceLeave(final UUID serviceUUID) { + public void serviceJoin(final Service service, final UUID serviceUUID) { + if (!isOpen()) return; + + if (log.isInfoEnabled()) { + + log.info("service=" + service + ", serviceUUID" + serviceUUID); + + } + + client.getDelegate().serviceJoin(service, serviceUUID); + + } + + /** + * Delegated. {@inheritDoc} + */ + public void serviceLeave(final UUID serviceUUID) { + if(!isOpen()) return; if(log.isInfoEnabled()) { @@ -1007,7 +1023,6 @@ * before the service can be started. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ protected class StartDeferredTasksTask implements Runnable { @@ -1303,7 +1318,6 @@ * load balancer service. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public static class ReportTask implements Runnable { @@ -1504,7 +1518,6 @@ * Sends events to the load balancer service. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ * * FIXME should discard events if too many build up on the client. */ Modified: branches/maven_scaleout/src/main/java/com/bigdata/service/DefaultClientDelegate.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/service/DefaultClientDelegate.java 2010-09-01 20:02:58 UTC (rev 3491) +++ branches/maven_scaleout/src/main/java/com/bigdata/service/DefaultClientDelegate.java 2010-09-01 21:16:52 UTC (rev 3492) @@ -19,7 +19,6 @@ * {@link AbstractClient#setDelegate(IFederationDelegate)}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class DefaultClientDelegate<T> implements IFederationDelegate<T> { @@ -118,6 +117,11 @@ } /** NOP */ + public void serviceJoin(Service service, UUID serviceUUID) { + + } + + /** NOP */ public void serviceLeave(UUID serviceUUID) { } Modified: branches/maven_scaleout/src/main/java/com/bigdata/service/DefaultServiceFederationDelegate.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/service/DefaultServiceFederationDelegate.java 2010-09-01 20:02:58 UTC (rev 3491) +++ branches/maven_scaleout/src/main/java/com/bigdata/service/DefaultServiceFederationDelegate.java 2010-09-01 21:16:52 UTC (rev 3492) @@ -53,7 +53,6 @@ * service interface reported to the load balancer service. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class DefaultServiceFederationDelegate<T extends AbstractService> implements IFederationDelegate<T> { @@ -145,6 +144,11 @@ } /** NOP */ + public void serviceJoin(Service service, UUID serviceUUID) { + + } + + /** NOP */ public void serviceLeave(UUID serviceUUID) { } Modified: branches/maven_scaleout/src/main/java/com/bigdata/service/IFederationDelegate.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/service/IFederationDelegate.java 2010-09-01 20:02:58 UTC (rev 3491) +++ branches/maven_scaleout/src/main/java/com/bigdata/service/IFederationDelegate.java 2010-09-01 21:16:52 UTC (rev 3492) @@ -39,7 +39,6 @@ * by the {@link AbstractFederation}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ * @param <T> * The generic type of the client or service. */ @@ -100,15 +99,27 @@ * Notice that the service has been discovered. This notice will be * generated the first time the service is discovered by a given * {@link IBigdataClient}. - * + * * @param service * The service. * @param serviceUUID * The service {@link UUID}. */ public void serviceJoin(IService service, UUID serviceUUID); - + /** + * Notice that the service has been discovered. This notice will be + * generated the first time the service is discovered by a given + * {@link IBigdataClient}. + * + * @param service + * The service. + * @param serviceUUID + * The service {@link UUID}. + */ + public void serviceJoin(Service service, UUID serviceUUID); + + /** * Notice that the service is no longer available. This notice will be * generated once for a given {@link IBigdataClient} when the service is no * longer available from any of its service registrars. Modified: branches/maven_scaleout/src/main/java/com/bigdata/service/jini/JiniFederation.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/service/jini/JiniFederation.java 2010-09-01 20:02:58 UTC (rev 3491) +++ branches/maven_scaleout/src/main/java/com/bigdata/service/jini/JiniFederation.java 2010-09-01 21:16:52 UTC (rev 3492) @@ -109,12 +109,12 @@ //BTM import com.bigdata.service.LoadBalancer; +import com.bigdata.service.Service; /** * Concrete implementation for Jini. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class JiniFederation<T> extends AbstractDistributedFederation<T> implements DiscoveryListener, ServiceDiscoveryListener { @@ -1260,17 +1260,22 @@ if (serviceItem.service instanceof IService) { -// System.err.println("serviceAdded: "+serviceItem); - final UUID serviceUUID = JiniUtil .serviceID2UUID(serviceItem.serviceID); serviceJoin((IService) serviceItem.service, serviceUUID); + } else if (serviceItem.service instanceof Service) { + + final UUID serviceUUID = JiniUtil + .serviceID2UUID(serviceItem.serviceID); + + serviceJoin((Service) serviceItem.service, serviceUUID); + } else { - log.warn("Not an " + IService.class); - + log.warn("Not an " + IService.class + " or an " + Service.class); + } } @@ -1410,7 +1415,6 @@ * Glue object. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ static private class TaskFuture<T> { @@ -1436,7 +1440,6 @@ * Run as a scheduled task that monitors futures. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ private static class MonitorFuturesTask implements Runnable { Modified: branches/maven_scaleout/src/main/java/com/bigdata/service/jini/util/ListServices.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/service/jini/util/ListServices.java 2010-09-01 20:02:58 UTC (rev 3491) +++ branches/maven_scaleout/src/main/java/com/bigdata/service/jini/util/ListServices.java 2010-09-01 21:16:52 UTC (rev 3492) @@ -59,7 +59,6 @@ * Utility will list the discovered services in federation to which it connects. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class ListServices { @@ -155,7 +154,6 @@ * * @author <a href="mailto:tho...@us...">Bryan * Thompson</a> - * @version $Id$ */ static class DiscoverAndListTask implements Callable<String> { @@ -329,7 +327,12 @@ + "running.\n"); sb.append("Discovered " + registrars.length - + " jini service registrars.\n"); + + " jini service registrars. [ "); + for (ServiceRegistrar registrar : registrars) { + sb.append(registrar.getLocator().toString()); + sb.append(' '); + } + sb.append("]\n"); sb.append("Discovered " + a.length + " services\n"); Modified: branches/maven_scaleout/src/test/java/com/bigdata/resources/AbstractResourceManagerTestCase.java =================================================================== --- branches/maven_scaleout/src/test/java/com/bigdata/resources/AbstractResourceManagerTestCase.java 2010-09-01 20:02:58 UTC (rev 3491) +++ branches/maven_scaleout/src/test/java/com/bigdata/resources/AbstractResourceManagerTestCase.java 2010-09-01 21:16:52 UTC (rev 3492) @@ -79,13 +79,13 @@ import com.bigdata.util.httpd.AbstractHTTPD; import com.bigdata.service.LoadBalancer; +import com.bigdata.service.Service; /** * Base class for {@link ResourceManager} test suites that can use normal * startup and shutdown. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class AbstractResourceManagerTestCase extends AbstractResourceManagerBootstrapTestCase { @@ -238,7 +238,6 @@ * trying to test. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ protected static class MockMetadataService implements IMetadataService { @@ -410,7 +409,6 @@ * {@link ResourceManager} during the unit tests. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ protected class MockFederation implements IBigdataFederation<MockMetadataService> { @@ -611,6 +609,9 @@ public void serviceJoin(IService service, UUID serviceUUID) { } + public void serviceJoin(Service service, UUID serviceUUID) { + } + public void serviceLeave(UUID serviceUUID) { } Modified: branches/maven_scaleout/src/test/java/com/bigdata/service/TestEventReceiver.java =================================================================== --- branches/maven_scaleout/src/test/java/com/bigdata/service/TestEventReceiver.java 2010-09-01 20:02:58 UTC (rev 3491) +++ branches/maven_scaleout/src/test/java/com/bigdata/service/TestEventReceiver.java 2010-09-01 21:16:52 UTC (rev 3492) @@ -63,7 +63,6 @@ * Unit tests for the {@link EventReceiver}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class TestEventReceiver extends TestCase2 { @@ -85,7 +84,6 @@ * {@link EventReceiver} on the {@link MockFederation}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ static class MyEvent extends Event { @@ -418,7 +416,6 @@ * Generates events. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ private static class EventFactory implements Callable<Void> { @@ -484,7 +481,6 @@ * the events are stored). * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ static private class EventConsumer implements Callable<Void> { @@ -525,7 +521,6 @@ * Mock federation to support the unit tests in the outer class. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ static class MockFederation implements IBigdataFederation<IEventReceivingService> { @@ -706,9 +701,13 @@ } public void serviceJoin(IService service, UUID serviceUUID) { - + } + public void serviceJoin(Service service, UUID serviceUUID) { + + } + public void serviceLeave(UUID serviceUUID) { } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-01 20:03:04
|
Revision: 3491 http://bigdata.svn.sourceforge.net/bigdata/?rev=3491&view=rev Author: thompsonbry Date: 2010-09-01 20:02:58 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Added comments related to https://sourceforge.net/apps/trac/bigdata/ticket/151 and https://sourceforge.net/apps/trac/bigdata/ticket/152. Reconciled edits from Martyn in RWStrategy. Made freeImmediately() private. Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IRootBlockView.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockCommitter.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IRootBlockView.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IRootBlockView.java 2010-09-01 18:43:58 UTC (rev 3490) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IRootBlockView.java 2010-09-01 20:02:58 UTC (rev 3491) @@ -112,9 +112,17 @@ * The root block version number. */ public int getVersion(); - + /** * The next offset at which a data item would be written on the store. + * + * FIXME The RWStore has different semantics for this field. Document those + * semantics and modify {@link AbstractJournal} so we can directly decide + * how many bytes were "written" (for the WORM) or were "allocated" (for the + * RWStore, in which case it should probably be the net of the bytes + * allocated and released). Update all the locations in the code which rely + * on {@link #getNextOffset()} to compute the #of bytes written onto the + * store. */ public long getNextOffset(); Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2010-09-01 18:43:58 UTC (rev 3490) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2010-09-01 20:02:58 UTC (rev 3491) @@ -253,11 +253,12 @@ throw new IllegalArgumentException(); } - try { - long rwaddr = m_store.alloc(data.array(), nbytes, context); + try { /* FIXME [data] is not always backed by an array, the array may not be visible (read-only), the array offset may not be zero, etc. Try to drive the ByteBuffer into the RWStore.alloc() method instead. */ + if(data.hasArray()&&data.arrayOffset()!=0)throw new AssertionError(); + final long rwaddr = m_store.alloc(data.array(), nbytes, context); data.position(nbytes); // update position to end of buffer - long retaddr = encodeAddr(rwaddr, nbytes); + final long retaddr = encodeAddr(rwaddr, nbytes); return retaddr; } catch (RuntimeException re) { @@ -305,7 +306,7 @@ } public void delete(long addr) { - if (true) delete(addr, null); + delete(addr, null); } /** Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockCommitter.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockCommitter.java 2010-09-01 18:43:58 UTC (rev 3490) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockCommitter.java 2010-09-01 20:02:58 UTC (rev 3491) @@ -38,7 +38,7 @@ public class RootBlockCommitter implements ICommitter { final AbstractJournal journal; - public RootBlockCommitter(AbstractJournal journal) { + public RootBlockCommitter(final AbstractJournal journal) { this.journal = journal; } @@ -46,9 +46,16 @@ * Write the current root block to the Journal and return its address * to be stored in the CommitRecord. */ - public long handleCommit(long commitTime) { - ByteBuffer rbv = journal.getRootBlockView().asReadOnlyBuffer(); - + public long handleCommit(final long commitTime) { + final ByteBuffer rbv = journal.getRootBlockView().asReadOnlyBuffer(); + /* + * FIXME There is an API issue with the RWStore which does not allow + * us to pass in a read-only buffer. Write unit tests for this on + * the core IRawStore test suite and fix the RWStore. Also write + * unit tests when the array backing the ByteBuffer can be accessed + * but has a non-zero array offset (a mutable slice of a ByteBuffer). + */ +// return journal.write(rbv); ByteBuffer bb = ByteBuffer.allocate(rbv.capacity()); for (int i = 0; i < rbv.capacity(); i++) { bb.put(rbv.get()); Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2010-09-01 18:43:58 UTC (rev 3490) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2010-09-01 20:02:58 UTC (rev 3491) @@ -1084,6 +1084,7 @@ * @param sze */ public void free(final long laddr, final int sze, final IAllocationContext context) { +// if (true) return; final int addr = (int) laddr; switch (addr) { @@ -1118,7 +1119,7 @@ } - public void immediateFree(final int addr, final int sze) { + private void immediateFree(final int addr, final int sze) { switch (addr) { case 0: case -1: @@ -2621,7 +2622,7 @@ if (m_transactionService.getActiveCount() == 0) { return aged; - } else { + } else { return aged < earliest ? aged : earliest; } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2010-09-01 18:44:05
|
Revision: 3490 http://bigdata.svn.sourceforge.net/bigdata/?rev=3490&view=rev Author: martyncutcher Date: 2010-09-01 18:43:58 +0000 (Wed, 01 Sep 2010) Log Message: ----------- refine deferred delete release time Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2010-09-01 18:27:35 UTC (rev 3489) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2010-09-01 18:43:58 UTC (rev 3490) @@ -2616,10 +2616,13 @@ public Long call() throws Exception { long now = System.currentTimeMillis(); + long earliest = m_transactionService.getEarliestTxStartTime(); + long aged = now - m_transactionService.getMinReleaseAge(); + if (m_transactionService.getActiveCount() == 0) { - return now; - } else { - return now - m_transactionService.getMinReleaseAge(); // getEarliestTxStartTime(); + return aged; + } else { + return aged < earliest ? aged : earliest; } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-01 18:27:44
|
Revision: 3489 http://bigdata.svn.sourceforge.net/bigdata/?rev=3489&view=rev Author: thompsonbry Date: 2010-09-01 18:27:35 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Added life cycle hooks for operator evaluation to the query engine. Modified the operator evaluation context to use the bop annotations for the access path. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpUtility.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/TestBOpUtility.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/IQueryClient.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/QueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/DuplicateBOpException.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/BOpShard.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/HaltOpMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/PipelineUtility.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/StartOpMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestPipelineUtility.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine2.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java 2010-09-01 18:27:08 UTC (rev 3488) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java 2010-09-01 18:27:35 UTC (rev 3489) @@ -152,6 +152,16 @@ * The default timeout for operator evaluation. */ long DEFAULT_TIMEOUT = Long.MAX_VALUE; + + /** + * For hash partitioned operators, this is the set of the member nodes + * for the operator. + * <p> + * This annotation is required for such operators since the set of known + * nodes of a given type (such as all data services) can otherwise + * change at runtime. + */ + String MEMBER_SERVICES = "memberServices"; } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-01 18:27:08 UTC (rev 3488) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-01 18:27:35 UTC (rev 3489) @@ -30,29 +30,29 @@ import org.apache.log4j.Logger; import com.bigdata.bop.engine.BOpStats; +import com.bigdata.btree.IIndex; import com.bigdata.btree.ILocalBTreeView; +import com.bigdata.btree.IRangeQuery; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.ITx; import com.bigdata.journal.TimestampUtility; -import com.bigdata.relation.AbstractRelation; import com.bigdata.relation.IRelation; +import com.bigdata.relation.accesspath.AccessPath; import com.bigdata.relation.accesspath.IAccessPath; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; import com.bigdata.relation.locator.IResourceLocator; import com.bigdata.relation.rule.IRule; import com.bigdata.relation.rule.eval.IJoinNexus; +import com.bigdata.service.DataService; import com.bigdata.service.IBigdataFederation; +import com.bigdata.striterator.IKeyOrder; /** * The evaluation context for the operator (NOT serializable). * * @param <E> * The generic type of the objects processed by the operator. - * - * @todo Make it easy to obtain another {@link BOpContext} in which the source - * or sink are different? E.g., for the evaluation of the right operand in - * a join. */ public class BOpContext<E> { @@ -189,6 +189,9 @@ * @throws IllegalArgumentException * if the <i>indexManager</i> is <code>null</code> * @throws IllegalArgumentException + * if the <i>indexManager</i> is is not a <em>local</em> index + * manager. + * @throws IllegalArgumentException * if the <i>readTimestamp</i> is {@link ITx#UNISOLATED} * (queries may not read on the unisolated indices). * @throws IllegalArgumentException @@ -210,6 +213,16 @@ final IBlockingBuffer<E[]> sink, final IBlockingBuffer<E[]> sink2) { if (indexManager == null) throw new IllegalArgumentException(); + if (indexManager instanceof IBigdataFederation<?>) { + /* + * This is disallowed because the predicate specifies an index + * partition and expects to have access to the local index objects + * for that index partition. + */ + throw new IllegalArgumentException( + "Expecting a local index manager, not: " + + indexManager.getClass().toString()); + } if (readTimestamp == ITx.UNISOLATED) throw new IllegalArgumentException(); if (TimestampUtility.isReadOnly(writeTimestamp)) @@ -263,7 +276,6 @@ } /** - /** * Obtain an access path reading from relation for the specified predicate * (from the tail of some rule). * <p> @@ -282,12 +294,44 @@ * * @return The access path. * - * @todo replaces {@link IJoinNexus#getTailAccessPath(IRelation, IPredicate)}. + * @todo replaces + * {@link IJoinNexus#getTailAccessPath(IRelation, IPredicate)}. */ @SuppressWarnings("unchecked") public IAccessPath<?> getAccessPath(final IRelation<?> relation, final IPredicate<?> predicate) { + if (relation == null) + throw new IllegalArgumentException(); + + if (predicate == null) + throw new IllegalArgumentException(); + + final IKeyOrder keyOrder = relation.getKeyOrder((IPredicate) predicate); + + if (keyOrder == null) + throw new RuntimeException("No access path: " + predicate); + + final int partitionId = predicate.getPartitionId(); + + final int flags = predicate.getProperty( + PipelineOp.Annotations.FLAGS, + PipelineOp.Annotations.DEFAULT_FLAGS) + | (TimestampUtility.isReadOnly(getReadTimestamp()) ? IRangeQuery.READONLY + : 0); + + final int chunkOfChunksCapacity = predicate.getProperty( + PipelineOp.Annotations.CHUNK_OF_CHUNKS_CAPACITY, + PipelineOp.Annotations.DEFAULT_CHUNK_OF_CHUNKS_CAPACITY); + + final int chunkCapacity = predicate.getProperty( + PipelineOp.Annotations.CHUNK_CAPACITY, + PipelineOp.Annotations.DEFAULT_CHUNK_CAPACITY); + + final int fullyBufferedReadThreshold = predicate.getProperty( + PipelineOp.Annotations.FULLY_BUFFERED_READ_THRESHOLD, + PipelineOp.Annotations.DEFAULT_FULLY_BUFFERED_READ_THRESHOLD); + if (predicate.getPartitionId() != -1) { /* @@ -299,18 +343,64 @@ * require a total view of the relation, which is not available * during scale-out pipeline joins. Likewise, the [backchain] * property will be ignored since it is handled by an expander. + * + * @todo Replace this with IRelation#getAccessPathForIndexPartition() */ +// return ((AbstractRelation<?>) relation) +// .getAccessPathForIndexPartition(indexManager, +// (IPredicate) predicate); + /* + * @todo This condition should probably be an error since the expander + * will be ignored. + */ +// if (predicate.getSolutionExpander() != null) +// throw new IllegalArgumentException(); + + final String namespace = relation.getNamespace();//predicate.getOnlyRelationName(); - return ((AbstractRelation<?>) relation) - .getAccessPathForIndexPartition(indexManager, - (IPredicate) predicate); + // The name of the desired index partition. + final String name = DataService.getIndexPartitionName(namespace + + "." + keyOrder.getIndexName(), partitionId); + // MUST be a local index view. + final ILocalBTreeView ndx = (ILocalBTreeView) indexManager + .getIndex(name, readTimestamp); + + return new AccessPath(relation, indexManager, readTimestamp, + predicate, keyOrder, ndx, flags, chunkOfChunksCapacity, + chunkCapacity, fullyBufferedReadThreshold).init(); + } - // Find the best access path for the predicate for that relation. - final IAccessPath<?> accessPath = relation - .getAccessPath((IPredicate) predicate); + /* + * Find the best access path for the predicate for that relation. + * + * @todo Replace this with IRelation#getAccessPath(IPredicate) once the + * bop conversion is done. It is the same logic. + */ + IAccessPath accessPath; + { +// accessPath = relation.getAccessPath((IPredicate) predicate); + + final IIndex ndx = relation.getIndex(keyOrder); + + if (ndx == null) { + + throw new IllegalArgumentException("no index? relation=" + + relation.getNamespace() + ", timestamp=" + + readTimestamp + ", keyOrder=" + keyOrder + ", pred=" + + predicate + ", indexManager=" + getIndexManager()); + + } + + accessPath = new AccessPath((IRelation) relation, indexManager, + readTimestamp, (IPredicate) predicate, + (IKeyOrder) keyOrder, ndx, flags, chunkOfChunksCapacity, + chunkCapacity, fullyBufferedReadThreshold).init(); + + } + /* * @todo No expander's for bops, at least not right now. They could be * added in easily enough, which would support additional features for Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpUtility.java 2010-09-01 18:27:08 UTC (rev 3488) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpUtility.java 2010-09-01 18:27:35 UTC (rev 3489) @@ -29,8 +29,12 @@ import java.util.Iterator; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.log4j.Logger; + import com.bigdata.bop.BOp.Annotations; import com.bigdata.btree.AbstractNode; @@ -44,12 +48,11 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ - * - * @todo In general recursive traversal iterators do not protect against loops - * in the operator tree, but see {@link #getIndex(BOp)}. */ public class BOpUtility { + private static final Logger log = Logger.getLogger(BOpUtility.class); + /** * Pre-order recursive visitation of the operator tree (arguments only, no * annotations). @@ -361,36 +364,53 @@ * Return an index from the {@link BOp.Annotations#BOP_ID} to the * {@link BOp} for each spanned {@link BOp} (including annotations). * {@link BOp}s without identifiers are not indexed. + * <p> + * {@link BOp}s should form directed acyclic graphs, but this is not + * strictly enforced. The recursive traversal iterators declared by this + * class do not protect against loops in the operator tree. However, + * {@link #getIndex(BOp)} detects and report loops based on duplicate + * {@link Annotations#BOP_ID}s -or- duplicate {@link BOp} references. * * @param op * A {@link BOp}. * * @return The index. * - * @todo define recursive striterator for {@link BOp}s (as top-level method) - * and then layer on an expander for the {@link BOp} annotations. - * Finally, layer in a filter for the presence of the bopId. The - * {@link BOp}s visited by the iterator should be inserted into the - * indexed. [it is an error if there is a duplicate bopId.] + * @throws DuplicateBOpIdException + * if there are two or more {@link BOp}s having the same + * {@link Annotations#BOP_ID}. + * @throws BadBOpIdTypeException + * if the {@link Annotations#BOP_ID} is not an {@link Integer}. + * @throws DuplicateBOpException + * if the same {@link BOp} appears more once in the operator + * tree and it is neither an {@link IVariable} nor an + * {@link IConstant}. */ static public Map<Integer,BOp> getIndex(final BOp op) { final LinkedHashMap<Integer, BOp> map = new LinkedHashMap<Integer, BOp>(); + final LinkedHashSet<BOp> distinct = new LinkedHashSet<BOp>(); final Iterator<BOp> itr = preOrderIteratorWithAnnotations(op); while (itr.hasNext()) { final BOp t = itr.next(); final Object x = t.getProperty(Annotations.BOP_ID); - if (x == null) { - continue; + if (x != null) { + if (!(x instanceof Integer)) { + throw new BadBOpIdTypeException("Must be Integer, not: " + + x.getClass() + ": " + Annotations.BOP_ID); + } + final Integer id = (Integer) t.getProperty(Annotations.BOP_ID); + final BOp conflict = map.put(id, t); + if (conflict != null) + throw new DuplicateBOpIdException("duplicate id=" + id + + " for " + conflict + " and " + t); } - if (!(x instanceof Integer)) { - throw new BadBOpIdTypeException("Must be Integer, not: " - + x.getClass() + ": " + Annotations.BOP_ID); + if (!distinct.add(t) && !(t instanceof IVariableOrConstant<?>)) { + /* + * BOp appears more than once. This is only allowed for + * constants and variables. + */ + throw new DuplicateBOpException(t.toString()); } - final Integer id = (Integer) t.getProperty(Annotations.BOP_ID); - final BOp conflict = map.put(id, t); - if (conflict != null) - throw new DuplicateBOpIdException("duplicate id=" + id + " for " - + conflict + " and " + t); } return map; } Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/DuplicateBOpException.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/DuplicateBOpException.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/DuplicateBOpException.java 2010-09-01 18:27:35 UTC (rev 3489) @@ -0,0 +1,24 @@ +package com.bigdata.bop; + +/** + * Exception thrown when a {@link BOp} appears more than once in an operator + * tree (operator trees must not contain loops and operator instances may not + * appear more than once unless they are an {@link IConstant} or an + * {@link IVariable}). + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: DuplicateBOpIdException.java 3466 2010-08-27 14:28:04Z + * thompsonbry $ + */ +public class DuplicateBOpException extends RuntimeException { + + /** + * @param msg + */ + public DuplicateBOpException(String msg) { + super(msg); + } + + private static final long serialVersionUID = 1L; + +} \ No newline at end of file Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/DuplicateBOpException.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java 2010-09-01 18:27:08 UTC (rev 3488) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java 2010-09-01 18:27:35 UTC (rev 3489) @@ -30,6 +30,8 @@ import java.util.concurrent.FutureTask; import com.bigdata.bop.engine.BOpStats; +import com.bigdata.btree.IRangeQuery; +import com.bigdata.relation.accesspath.AccessPath; import com.bigdata.relation.accesspath.BlockingBuffer; import com.bigdata.relation.accesspath.IBlockingBuffer; import com.bigdata.relation.accesspath.IBuffer; @@ -55,6 +57,8 @@ * would block (default {@value #DEFAULT_CHUNK_OF_CHUNKS_CAPACITY}). * Note that partial chunks may be combined into full chunks whose * nominal capacity is specified by {@link #CHUNK_CAPACITY}. + * + * @see #DEFAULT_CHUNK_OF_CHUNKS_CAPACITY */ String CHUNK_OF_CHUNKS_CAPACITY = PipelineOp.class.getName() + ".chunkOfChunksCapacity"; @@ -69,6 +73,7 @@ * of {@link IBindingSet}s (default {@value #CHUNK_CAPACITY}). Partial * chunks may be automatically combined into full chunks. * + * @see #DEFAULT_CHUNK_CAPACITY * @see #CHUNK_OF_CHUNKS_CAPACITY */ String CHUNK_CAPACITY = PipelineOp.class.getName() + ".chunkCapacity"; @@ -83,6 +88,8 @@ * for another chunk to combine with the current chunk before returning * the current chunk (default {@value #DEFAULT_CHUNK_TIMEOUT}). This may * be ZERO (0) to disable the chunk combiner. + * + * @see #DEFAULT_CHUNK_TIMEOUT */ String CHUNK_TIMEOUT = PipelineOp.class.getName() + ".chunkTimeout"; @@ -93,6 +100,46 @@ */ int DEFAULT_CHUNK_TIMEOUT = 1000; + /** + * If the estimated rangeCount for an {@link AccessPath#iterator()} is + * LTE this threshold then use a fully buffered (synchronous) iterator. + * Otherwise use an asynchronous iterator whose capacity is governed by + * {@link #CHUNK_OF_CHUNKS_CAPACITY}. + * + * @see #DEFAULT_FULLY_BUFFERED_READ_THRESHOLD + */ + String FULLY_BUFFERED_READ_THRESHOLD = PipelineOp.class.getName() + + ".fullyBufferedReadThreshold"; + + /** + * Default for {@link #FULLY_BUFFERED_READ_THRESHOLD}. + * + * @todo try something closer to the branching factor, e.g., 100. + */ + int DEFAULT_FULLY_BUFFERED_READ_THRESHOLD = 1000; + + /** + * Flags for the iterator ({@link IRangeQuery#KEYS}, + * {@link IRangeQuery#VALS}, {@link IRangeQuery#PARALLEL}). + * <p> + * Note: The {@link IRangeQuery#PARALLEL} flag here is an indication + * that the iterator may run in parallel across the index partitions. + * This only effects scale-out and only for simple triple patterns since + * the pipeline join does something different (it runs inside the index + * partition using the local index, not the client's view of a + * distributed index). + * + * @see #DEFAULT_FLAGS + */ + String FLAGS = PipelineOp.class.getName() + ".flags"; + + /** + * The default flags will visit the keys and values of the non-deleted + * tuples and allows parallelism in the iterator (when supported). + */ + final int DEFAULT_FLAGS = IRangeQuery.KEYS | IRangeQuery.VALS + | IRangeQuery.PARALLEL; + } /** Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2010-09-01 18:27:08 UTC (rev 3488) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2010-09-01 18:27:35 UTC (rev 3489) @@ -39,7 +39,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; @@ -59,7 +58,6 @@ import com.bigdata.btree.BytesUtil; import com.bigdata.btree.keys.IKeyBuilder; import com.bigdata.counters.CAT; -import com.bigdata.journal.IIndexManager; import com.bigdata.relation.IRelation; import com.bigdata.relation.accesspath.AbstractUnsynchronizedArrayBuffer; import com.bigdata.relation.accesspath.AccessPath; @@ -73,8 +71,6 @@ import com.bigdata.relation.rule.IStarJoin; import com.bigdata.relation.rule.IStarJoin.IStarConstraint; import com.bigdata.relation.rule.eval.ISolution; -import com.bigdata.relation.rule.eval.pipeline.DistributedJoinTask; -import com.bigdata.relation.rule.eval.pipeline.JoinMasterTask; import com.bigdata.service.DataService; import com.bigdata.striterator.IChunkedOrderedIterator; import com.bigdata.striterator.IKeyOrder; @@ -95,30 +91,14 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ * - * @todo There is only one source, even if scale-out, and the {@link JoinTask} - * runs only for the duration of that source. The termination conditions - * for query evaluation are handled outside of the operator - * implementation. - * <p> - * The first join dimension always has a single source - the - * initialBindingSet established by the {@link JoinMasterTask}. Downstream - * join dimensions read from {@link IAsynchronousIterator} (s) from the - * upstream join dimension. When the {@link IIndexManager} allows - * key-range partitions, then the fan-in for the sources may be larger - * than one as there will be one {@link JoinTask} for each index partition - * touched by each join dimension. - * - * @todo provide more control over the access path (fully buffered read - * thresholds). - * - * @todo Do we need to hook the source and sink {@link Future}s? - * * @todo Break the star join logic out into its own join operator. * * @todo Implement operator at a time or mega-chunk pipeline operators for high * volume query. These will differ by running across the entire shard on * the right hand operator using multi-block IO each time they process a * (mega-)chunk of bindings from the left hand operator. + * + * @todo Support SLICE via annotations. */ public class PipelineJoin extends AbstractPipelineOp<IBindingSet> implements BindingSetPipelineOp { @@ -334,12 +314,6 @@ */ private static class JoinTask extends Haltable<Void> implements Callable<Void> { -// /** -// * The federation reference is passed along when we evaluate the -// * {@link #left} operand. -// */ -// final protected IBigdataFederation<?> fed; - /** * The join that is being executed. */ @@ -361,15 +335,15 @@ * the failed joined needs to jump out of a join group rather than * routing directly to the ancestor in the operator tree. * - * @todo Support for the {@link #optionalSink} is not finished. When the - * optional target is not simply the direct ancestor in the - * operator tree then we need to have a separate thread local - * buffering in front of the optional sink for the join task. This - * means that we need to use two {@link #threadLocalBufferFactory} - * s, one for the optional path. All of this only matters when the - * binding sets are being routed out of an optional join group. - * When the tails are independent optionals then the target is the - * same as the target for binding sets which do join. + * FIXME Support for the {@link #optionalSink} is not finished. When the + * optional target is not simply the direct ancestor in the operator + * tree then we need to have a separate thread local buffering in front + * of the optional sink for the join task. This means that we need to + * use two {@link #threadLocalBufferFactory} s, one for the optional + * path. All of this only matters when the binding sets are being routed + * out of an optional join group. When the tails are independent + * optionals then the target is the same as the target for binding sets + * which do join. */ final IBlockingBuffer<IBindingSet[]> optionalSink; @@ -406,82 +380,6 @@ */ final protected BOpContext<IBindingSet> context; -// /** -// * Volatile flag is set <code>true</code> if the {@link JoinTask} -// * (including any tasks executing on its behalf) should halt. This flag -// * is monitored by the {@link BindingSetConsumerTask}, the -// * {@link AccessPathTask}, and the {@link ChunkTask}. It is set by any -// * of those tasks if they are interrupted or error out. -// * -// * @todo review handling of this flag. Should an exception always be -// * thrown if the flag is set wrapping the {@link #firstCause}? Are -// * there any cases where the behavior should be different? If not, -// * then replace tests with halt() and encapsulate the logic in -// * that method. -// */ -// volatile protected boolean halt = false; -// -// /** -// * Set by {@link BindingSetConsumerTask}, {@link AccessPathTask}, and -// * {@link ChunkTask} if they throw an error. Tasks are required to use -// * an {@link AtomicReference#compareAndSet(Object, Object)} and must -// * specify <code>null</code> as the expected value. This ensures that -// * only the first cause is recorded by this field. -// */ -// final protected AtomicReference<Throwable> firstCause = new AtomicReference<Throwable>( -// null); -// -// /** -// * Indicate that join processing should halt. This method is written -// * defensively and will not throw anything. -// * -// * @param cause -// * The cause. -// */ -// protected void halt(final Throwable cause) { -// -// halt = true; -// -// final boolean isFirstCause = firstCause.compareAndSet( -// null/* expect */, cause); -// -// if (log.isEnabledFor(Level.WARN)) -// -// try { -// -// if (!InnerCause.isInnerCause(cause, -// InterruptedException.class) -// && !InnerCause.isInnerCause(cause, -// CancellationException.class) -// && !InnerCause.isInnerCause(cause, -// ClosedByInterruptException.class) -// && !InnerCause.isInnerCause(cause, -// RejectedExecutionException.class) -// && !InnerCause.isInnerCause(cause, -// BufferClosedException.class)) { -// -// /* -// * This logs all unexpected causes, not just the first -// * one to be reported for this join task. -// * -// * Note: The master will log the firstCause that it -// * receives as an error. -// */ -// -// log.warn("joinOp=" + joinOp + ", isFirstCause=" -// + isFirstCause + " : " -// + cause.getLocalizedMessage(), cause); -// -// } -// -// } catch (Throwable ex) { -// -// // error in logging system - ignore. -// -// } -// -// } - /** * The statistics for this {@link JoinTask}. */ @@ -797,26 +695,9 @@ } catch (Throwable t) { - try { - logCallError(t); - } catch (Throwable t2) { - log.error(t2.getLocalizedMessage(), t2); - } - /* * This is used for processing errors and also if this task is - * interrupted (because a SLICE has been satisfied). - * - * @todo For a SLICE, consider that the query solution buffer - * proxy could return the #of solutions added so far so that we - * can halt each join task on the last join dimension in a - * relatively timely manner producing no more than one chunk too - * many (actually, it might not be that timely since some index - * partitions might not produce any solutions; this suggests - * that the master might need a fatter API than a Future for the - * JoinTask so that it can directly notify the JoinTasks for the - * first predicate and they can propagate that notice downstream - * to their sinks). This will be an issue when fanOut GT ONE. + * interrupted (because the sink has been closed). */ halt(t); @@ -836,13 +717,6 @@ log.error(t2.getLocalizedMessage(), t2); } -// // report join stats _before_ we close our source(s). -// try { -// reportOnce(); -// } catch (Throwable t2) { -// log.error(t2.getLocalizedMessage(), t2); -// } - /* * Close source iterators, which will cause any source JoinTasks * that are still executing to throw a CancellationException @@ -857,61 +731,11 @@ throw new RuntimeException(t); - } finally { - -// // report join stats iff they have not already been reported. -// reportOnce(); - } } /** - * Method is used to log the primary exception thrown by {@link #call()} - * . The default implementation does nothing and the exception will be - * logged by the {@link JoinMasterTask}. However, this method is - * overridden by {@link DistributedJoinTask} so that the exception can - * be logged on the host and {@link DataService} where it originates. - * This appears to be necessary in order to trace back the cause of an - * exception which can otherwise be obscured (or even lost?) in a deeply - * nested RMI stack trace. - * - * @param o - * @param t - */ - protected void logCallError(Throwable t) { - - } - -// /** -// * Method reports {@link JoinStats} to the {@link JoinMasterTask}, but -// * only if they have not already been reported. This "report once" -// * constraint is used to make it safe to invoke during error handling -// * before actions which could cause the source {@link JoinTask}s (and -// * hence the {@link JoinMasterTask}) to terminate. -// */ -// protected void reportOnce() { -// -// if (didReport.compareAndSet(false/* expect */, true/* update */)) { -// -//// try { -//// -////// @todo report statistics to the master. -//// masterProxy.report(stats); -//// -//// } catch (IOException ex) { -//// -//// log.warn("Could not report statistics to the master", ex); -//// -//// } -// -// } -// -// } -// -// private final AtomicBoolean didReport = new AtomicBoolean(false); - - /** * Consume {@link IBindingSet} chunks from the {@link #sink}. * * @throws Exception Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/TestBOpUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/TestBOpUtility.java 2010-09-01 18:27:08 UTC (rev 3488) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/TestBOpUtility.java 2010-09-01 18:27:35 UTC (rev 3489) @@ -416,7 +416,7 @@ } /** - * Unit test for {@link BOpUtility#getIndex(BOp)}. + * Unit test for {@link BOpUtility#getIndex(BOp)} using valid inputs. */ public void test_getIndex() { @@ -489,7 +489,7 @@ /** * Unit test for {@link BOpUtility#getIndex(BOp)} in which we verify that it - * rejects operator trees operator ids which are not {@link Integer}s. + * rejects operator trees with operator ids which are not {@link Integer}s. */ public void test_getIndex_rejectsNonIntegerIds() { @@ -510,6 +510,65 @@ } /** + * Unit test for {@link BOpUtility#getIndex(BOp)} in which we verify that it + * rejects operator trees in which the same {@link BOp} reference appears + * more than once but allows duplicate {@link IVariable}s and + * {@link IConstant}s. + */ + public void test_getIndex_duplicateBOps() { + + final IConstant<Long> c1 = new Constant<Long>(12L); + final IVariable<?> v1 = Var.var("y"); + + /* + * Operator tree with duplicate variable and duplicate constant refs. + */ + { + // root + final BOp root = new BOpBase(new BOp[] { // root args[] + c1, v1 }, NV.asMap(new NV[] {// + new NV(BOp.Annotations.BOP_ID, 4),// + new NV("foo", v1), // duplicate variable. + new NV("bar", c1) // duplicate variable. + })); + + // should be Ok. + final Map<Integer, BOp> map = BOpUtility.getIndex(root); + + assertTrue(root == map.get(4)); + + } + + /* + * Operator tree with duplicate bop which is neither a var nor or a + * constant. + */ + { + + /* + * bop w/o bopId is used to verify correct detection of duplicate + * references. + */ + final BOp op2 = new BOpBase(new BOp[]{}, null/*annotations*/); + + // root + final BOp root = new BOpBase(new BOp[] { // root args[] + op2, op2 }, NV.asMap(new NV[] {// + new NV(BOp.Annotations.BOP_ID, 4),// + })); + + try { + BOpUtility.getIndex(root); + fail("Expecting: " + DuplicateBOpException.class); + } catch (DuplicateBOpException ex) { + if (log.isInfoEnabled()) + log.info("Ignoring expected exception: " + ex); + } + } + + } + + /** * Unit test for {@link BOpUtility#getParent(BOp, BOp)}. */ public void test_getParent() { Added: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/BOpShard.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/BOpShard.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/BOpShard.java 2010-09-01 18:27:35 UTC (rev 3489) @@ -0,0 +1,75 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 1, 2010 + */ + +package com.bigdata.bop.engine; + +/** + * An immutable class capturing the evaluation context of an operator against a + * shard. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class BOpShard { + + public final int bopId; + + public final int shardId; + + public BOpShard(final int bopId, final int shardId) { + + this.bopId = bopId; + + this.shardId = shardId; + + } + + /** + * {@inheritDoc} + * + * @todo verify that this is a decent hash function. + */ + public int hashCode() { + + return (bopId * 31) + shardId; + + } + + public boolean equals(final Object o) { + + if (this == o) + return true; + + if (!(o instanceof BOpShard)) + return false; + + return bopId == ((BOpShard) o).bopId + && shardId == ((BOpShard) o).shardId; + + } + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/BOpShard.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Added: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/HaltOpMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/HaltOpMessage.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/HaltOpMessage.java 2010-09-01 18:27:35 UTC (rev 3489) @@ -0,0 +1,135 @@ +package com.bigdata.bop.engine; + +import java.io.Serializable; +import java.util.UUID; + +/** + * A message sent to the {@link IQueryClient} when an operator is done executing + * for some chunk of inputs. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class HaltOpMessage implements Serializable { + + /** + * + */ + private static final long serialVersionUID = 1L; + + /** The identifier of the query. */ + final long queryId; + + /** The identifier of the operator. */ + final int bopId; + + /** + * The index partition identifier against which the operator was + * executing. + */ + final int partitionId; + + /** + * The identifier of the service on which the operator was executing. + */ + final UUID serviceId; + + /** + * * The cause and <code>null</code> if the operator halted normally. + */ + final Throwable cause; + + /** + * The operator identifier for the primary sink -or- <code>null</code> + * if there is no primary sink (for example, if this is the last + * operator in the pipeline). + */ + final Integer sinkId; + + /** + * The number of the {@link BindingSetChunk}s that were output for the + * primary sink. (This information is used for the atomic termination + * decision.) + * <p> + * For a given downstream operator this is ONE (1) for scale-up. For + * scale-out, this is one per index partition over which the + * intermediate results were mapped. + */ + final int sinkChunksOut; + + /** + * The operator identifier for the alternative sink -or- + * <code>null</code> if there is no alternative sink. + */ + final Integer altSinkId; + + /** + * The number of the {@link BindingSetChunk}s that were output for the + * alternative sink. (This information is used for the atomic + * termination decision.) + * <p> + * For a given downstream operator this is ONE (1) for scale-up. For + * scale-out, this is one per index partition over which the + * intermediate results were mapped. It is zero if there was no + * alternative sink for the operator. + */ + final int altSinkChunksOut; + + /** + * The statistics for the execution of the bop against the partition on + * the service. + */ + final BOpStats taskStats; + + /** + * @param queryId + * The query identifier. + * @param bopId + * The operator whose execution phase has terminated for a + * specific index partition and input chunk. + * @param partitionId + * The index partition against which the operator was + * executed. + * @param serviceId + * The node which executed the operator. + * @param cause + * <code>null</code> unless execution halted abnormally. + * @param chunksOut + * A map reporting the #of binding set chunks which were + * output for each downstream operator for which at least one + * chunk of output was produced. + * @param taskStats + * The statistics for the execution of that bop on that shard + * and service. + */ + public HaltOpMessage( + // + final long queryId, final int bopId, final int partitionId, + final UUID serviceId, Throwable cause, // + final Integer sinkId, final int sinkChunksOut,// + final Integer altSinkId, final int altSinkChunksOut,// + final BOpStats taskStats) { + + if (altSinkId != null && sinkId == null) { + // The primary sink must be defined if the altSink is defined. + throw new IllegalArgumentException(); + } + + if (sinkId != null && altSinkId != null + && sinkId.intValue() == altSinkId.intValue()) { + // The primary and alternative sink may not be the same operator. + throw new IllegalArgumentException(); + } + + this.queryId = queryId; + this.bopId = bopId; + this.partitionId = partitionId; + this.serviceId = serviceId; + this.cause = cause; + this.sinkId = sinkId; + this.sinkChunksOut = sinkChunksOut; + this.altSinkId = altSinkId; + this.altSinkChunksOut = altSinkChunksOut; + this.taskStats = taskStats; + } +} \ No newline at end of file Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/HaltOpMessage.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/IQueryClient.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/IQueryClient.java 2010-09-01 18:27:08 UTC (rev 3488) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/IQueryClient.java 2010-09-01 18:27:35 UTC (rev 3489) @@ -2,7 +2,6 @@ import java.rmi.Remote; import java.rmi.RemoteException; -import java.util.UUID; import com.bigdata.bop.BOp; @@ -74,51 +73,17 @@ /** * Notify the client that execution has started for some query, operator, * node, and index partition. - * - * @param queryId - * The query identifier. - * @param opId - * The operator identifier. - * @param partitionId - * The index partition identifier. - * @param serviceId - * The node on which the operator will execute. - * @param nchunks - * The #of chunks which form the input to that operator (for the - * atomic termination condition decision). */ - public void startOp(long queryId, int opId, int partitionId, UUID serviceId, final int nchunks) + public void startOp(StartOpMessage msg) throws RemoteException; /** * Notify the client that execution has halted for some query, operator, - * node and index partition. If execution halted abnormally, then the cause - * is sent as well. - * - * @param queryId - * The query identifier. - * @param opId - * The operator whose execution phase has terminated for a - * specific index partition and input chunk. - * @param partitionId - * The index partition against which the operator was executed. - * @param serviceId - * The node which executed the operator. - * @param cause - * <code>null</code> unless execution halted abnormally. - * @param nchunks - * The #of chunks which were output by the operator (for the - * atomic termination decision). This is ONE (1) for scale-up. - * For scale-out, this is one per index partition over which the - * intermediate results were mapped. - * @param taskStats - * The statistics for the execution of that bop on that shard and - * service. + * node, shard, and source binding set chunk(s). If execution halted + * abnormally, then the cause is sent as well. */ - public void haltOp(long queryId, int opId, int partitionId, UUID serviceId, - Throwable cause, int nchunks, BOpStats taskStats) - throws RemoteException; - + public void haltOp(HaltOpMessage msg) throws RemoteException; + // /** // * Notify the query controller that a chunk of intermediate results is // * available for the query. Added: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/PipelineUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/PipelineUtility.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/PipelineUtility.java 2010-09-01 18:27:35 UTC (rev 3489) @@ -0,0 +1,156 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 1, 2010 + */ + +package com.bigdata.bop.engine; + +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.log4j.Logger; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.BOpUtility; +import com.bigdata.bop.NoSuchBOpException; + +/** + * Utility methods relevant to pipelined operator evaluation. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class PipelineUtility { + + private static final Logger log = Logger.getLogger(PipelineUtility.class); + + /** + * Return <code>true</code> iff the <i>runningCountMap</i> AND + * <i>availableChunkMap</i> map are ZERO (0) for both the given operator and + * for all operators which proceed the given operator in the tree structure + * of its operands. + * <p> + * Note: The movement of the intermediate binding set chunks forms an + * acyclic directed graph. We can decide whether or not a {@link BOp} in the + * query plan can be triggered by the current activity pattern by inspecting + * the {@link BOp} and its operands recursively. If neither the {@link BOp} + * nor any of its operands (recursively) has non-zero activity then the + * {@link BOp} can not be triggered and this method will return + * <code>true</code>. + * + * @param bopId + * The identifier for an operator which appears in the query + * plan. + * @param queryPlan + * The query plan. + * @param queryIndex + * An index for the query plan as constructed by + * {@link BOpUtility#getIndex(BOp)}. + * @param runningCountMap + * A map reporting the #of instances of each operator which are + * currently being evaluated (distinct evaluations are performed + * for each chunk and shard). + * @param availableChunkCountMap + * A map reporting the #of chunks available for each operator in + * the pipeline (we only report chunks for pipeline operators). + * + * @return <code>true</code> iff the {@link BOp} can not be triggered given + * the query plan and the activity map. + * + * @throws IllegalArgumentException + * if any argument is <code>null</code>. + * @throws NoSuchBOpException + * if <i>bopId</i> is not found in the query index. + */ + static public boolean isDone(final int bopId, final BOp queryPlan, + final Map<Integer, BOp> queryIndex, + final Map<Integer, AtomicLong> runningCountMap, + final Map<Integer, AtomicLong> availableChunkCountMap) { + + if (queryPlan == null) + throw new IllegalArgumentException(); + if (queryIndex == null) + throw new IllegalArgumentException(); + if (availableChunkCountMap == null) + throw new IllegalArgumentException(); + + final BOp op = queryIndex.get(bopId); + + if (op == null) + throw new NoSuchBOpException(bopId); + + final Iterator<BOp> itr = BOpUtility.preOrderIterator(op); + + while (itr.hasNext()) { + + final BOp t = itr.next(); + + final Integer id = (Integer) t.getProperty(BOp.Annotations.BOP_ID); + + if (id == null) + continue; + { + + final AtomicLong runningCount = runningCountMap.get(id); + + if (runningCount != null && runningCount.get() != 0) { + + if (log.isInfoEnabled()) + log.info("Operator can be triggered: op=" + op + + ", possible trigger=" + t + " is running."); + + return false; + + } + + } + + { + + final AtomicLong availableChunkCount = availableChunkCountMap + .get(id); + + if (availableChunkCount != null + && availableChunkCount.get() != 0) { + + if (log.isInfoEnabled()) + log.info("Operator can be triggered: op=" + op + + ", possible trigger=" + t + " has " + + availableChunkCount + " chunks available."); + + return false; + + } + + } + + } + + return true; + + } + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/PipelineUtility.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/QueryEngine.java 2010-09-01 18:27:08 UTC (rev 3488) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/QueryEngine.java 2010-09-01 18:27:35 UTC (rev 3489) @@ -517,7 +517,7 @@ public void bufferReady(IQueryClient clientProxy, InetSocketAddress serviceAddr, long queryId, int bopId) { - // TODO SCALEOUT + // @todo SCALEOUT notify peer when a buffer is ready. } @@ -538,22 +538,28 @@ return null; } - public void startOp(final long queryId, final int opId, - final int partitionId, final UUID serviceId, final int nchunks) - throws RemoteException { - final RunningQuery q = runningQueries.get(queryId); + public void startOp(final StartOpMessage msg) throws RemoteException { + + final RunningQuery q = runningQueries.get(msg.queryId); + if (q != null) { - q.startOp(opId, partitionId, serviceId, nchunks); + + q.startOp(msg); + } + } - public void haltOp(final long queryId, final int opId, - final int partitionId, final UUID serviceId, final Throwable cause, - final int nchunks, final BOpStats taskStats) throws RemoteException { - final RunningQuery q = runningQueries.get(queryId); + public void haltOp(final HaltOpMessage msg) throws RemoteException { + + final RunningQuery q = runningQueries.get(msg.queryId); + if (q != null) { - q.haltOp(opId, partitionId, serviceId, cause, nchunks, taskStats); + + q.haltOp(msg); + } + } /** Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/RunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/RunningQuery.java 2010-09-01 18:27:08 UTC (rev 3488) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/RunningQuery.java 2010-09-01 18:27:35 UTC (rev 3489) @@ -29,7 +29,10 @@ import java.nio.ByteBuffer; import java.rmi.RemoteException; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.Map; +import java.util.Set; import java.util.UUID; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; @@ -39,8 +42,8 @@ import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import org.apache.log4j.Logger; @@ -50,6 +53,8 @@ import com.bigdata.bop.BOpUtility; import com.bigdata.bop.BindingSetPipelineOp; import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IConstraint; +import com.bigdata.bop.IPredicate; import com.bigdata.bop.NoSuchBOpException; import com.bigdata.bop.ap.Predicate; import com.bigdata.relation.accesspath.IAsynchronousIterator; @@ -134,6 +139,86 @@ final IBlockingBuffer<IBindingSet[]> queryBuffer; /** + * An index from the {@link BOp.Annotations#BOP_ID} to the {@link BOp}. + */ + private final Map<Integer, BOp> bopIndex; + + /** + * A collection of the currently executing future for operators for this + * query. + */ + private final ConcurrentHashMap<BOpShard, Future<?>> operatorFutures = new ConcurrentHashMap<BOpShard, Future<?>>(); + + /** + * A lock guarding {@link #runningTaskCount}, {@link #availableChunkCount}, + * {@link #availableChunkCountMap}. + */ + private final ReentrantLock runStateLock = new ReentrantLock(); + + /** + * The #of tasks for this query which have started but not yet halted and + * ZERO (0) if this is not the query coordinator. + * <p> + * This is guarded by the {@link #runningStateLock}. + */ + private long runningTaskCount = 0; + + /** + * The #of chunks for this query of which a running task has made available + * but which have not yet been accepted for processing by another task and + * ZERO (0) if this is not the query coordinator. + * <p> + * This is guarded by the {@link #runningStateLock}. + */ + private long availableChunkCount = 0; + + /** + * A map reporting the #of chunks available for each operator in the + * pipeline (we only report chunks for pipeline operators). The total #of + * chunks available for any given operator in the pipeline is reported by + * {@link #availableChunkCount}. + * <p> + * The movement of the intermediate binding set chunks forms an acyclic + * directed graph. This map is used to track the #of chunks available for + * each bop in the pipeline. When a bop has no more incoming chunks, we send + * an asynchronous message to all nodes on which that bop had executed + * informing the {@link QueryEngine} on that node that it should immediately + * release all resources associated with that bop. + * <p> + * This is guarded by the {@link #runningStateLock}. + * + * FIXME {@link IConstraint}s for {@link PipelineJoin}, distinct elements + * and other filters for {@link IPredicate}s, conditional routing for + * binding sets in the pipeline (to route around an optional join group + * based on an {@link IConstraint}), and then buffer management for s/o. + * + * @todo SCALEOUT: Life cycle management of the operators and the query + * implies both a per-query bop:NodeList map on the query coordinator + * identifying the nodes on which the query has been executed and a + * per-query bop:ResourceList map identifying the resources associated + * with the execution of that bop on that node. In fact, this could be + * the same {@link #resourceMap} except that we would lose type + * information about the nature of the resource so it is better to + * have distinct maps for this purpose. + */ + private final Map<Integer/* bopId */, AtomicLong/* availableChunkCount */> availableChunkCountMap = new LinkedHashMap<Integer, AtomicLong>(); + + /** + * A collection reporting on the #of instances of a given {@link BOp} which + * are concurrently executing. + * <p> + * This is guarded by the {@link #runningStateLock}. + */ + private final Map<Integer/*bopId*/, AtomicLong/*runningCount*/> runningCountMap = new LinkedHashMap<Integer, AtomicLong>(); + + /** + * A collection of the operators which have executed at least once. ... [truncated message content] |
From: <mar...@us...> - 2010-09-01 18:27:15
|
Revision: 3488 http://bigdata.svn.sourceforge.net/bigdata/?rev=3488&view=rev Author: martyncutcher Date: 2010-09-01 18:27:08 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Use aged release time for checkfreeable Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2010-09-01 17:46:11 UTC (rev 3487) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2010-09-01 18:27:08 UTC (rev 3488) @@ -2615,10 +2615,11 @@ Long freeTime = m_transactionService.tryCallWithLock(new Callable<Long>() { public Long call() throws Exception { + long now = System.currentTimeMillis(); if (m_transactionService.getActiveCount() == 0) { - return System.currentTimeMillis(); + return now; } else { - return m_transactionService.getEarliestTxStartTime(); + return now - m_transactionService.getMinReleaseAge(); // getEarliestTxStartTime(); } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2010-09-01 17:46:18
|
Revision: 3487 http://bigdata.svn.sourceforge.net/bigdata/?rev=3487&view=rev Author: martyncutcher Date: 2010-09-01 17:46:11 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Refactor of IAllocationContext Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalDelegate.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/AbstractRawStore.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/IRawStore.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/Allocator.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/BlobAllocator.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/IStore.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/PSOutputStream.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractMRMWTestCase.java branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/StressTestConcurrentTx.java branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/StressTestConcurrentUnisolatedIndices.java branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java Added Paths: ----------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/IAllocationContext.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/JournalShadow.java branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/ReplicatedStore.java/ branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/ReplicatedStoreService.java/ Removed Paths: ------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IAllocationContext.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalShadow.java Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -319,12 +319,12 @@ * * Toggle comment appropriately to activate/deactivate */ -// final long[] addrsUsed = new long[4024 * 1024]; -// int addrsUsedCurs = 0; -// final char[] addrActions = new char[addrsUsed.length]; -// final int[] addrLens = new int[addrsUsed.length]; - final long[] addrsUsed = null; +/* final long[] addrsUsed = new long[4024 * 1024]; int addrsUsedCurs = 0; + final char[] addrActions = new char[addrsUsed.length]; + final int[] addrLens = new int[addrsUsed.length]; +*/ final long[] addrsUsed = null; + int addrsUsedCurs = 0; final char[] addrActions = null; final int[] addrLens = null; Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -88,6 +88,7 @@ import com.bigdata.rawstore.WormAddressManager; import com.bigdata.relation.locator.IResourceLocator; import com.bigdata.resources.ResourceManager; +import com.bigdata.rwstore.IAllocationContext; import com.bigdata.service.DataService; import com.bigdata.service.EmbeddedClient; import com.bigdata.service.IBigdataClient; @@ -986,8 +987,6 @@ this._rootBlock = fileMetadata.rootBlock; - setCommitter(DELETEBLOCK, new DeleteBlockCommitter((RWStrategy) _bufferStrategy)); - break; } @@ -2611,11 +2610,11 @@ } public long write(ByteBuffer data, final long oldAddr, IAllocationContext context) { - return _bufferStrategy.write(data, oldAddr, context); + return ((RWStrategy)_bufferStrategy).write(data, oldAddr, context); } public long write(ByteBuffer data, IAllocationContext context) { - return _bufferStrategy.write(data, context); + return ((RWStrategy)_bufferStrategy).write(data, context); } // Note: NOP for WORM. Used by RW for eventual recycle protocol. @@ -2631,12 +2630,12 @@ assertCanWrite(); - _bufferStrategy.delete(addr, context); + ((RWStrategy)_bufferStrategy).delete(addr, context); } public void detachContext(IAllocationContext context) { - _bufferStrategy.detachContext(context); + ((RWStrategy)_bufferStrategy).detachContext(context); } final public long getRootAddr(final int index) { @@ -2771,7 +2770,17 @@ * Register committer to write previous root block */ setCommitter(PREV_ROOTBLOCK, new RootBlockCommitter(this)); + + /** + * If the strategy is a RWStrategy, then register the delete + * block committer to store the deferred deletes for each + * commit record. + */ + if (_bufferStrategy instanceof RWStrategy) + setCommitter(DELETEBLOCK, new DeleteBlockCommitter((RWStrategy) _bufferStrategy)); + + } } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -71,6 +71,7 @@ import com.bigdata.resources.ResourceManager; import com.bigdata.resources.StaleLocatorException; import com.bigdata.resources.StaleLocatorReason; +import com.bigdata.rwstore.IAllocationContext; import com.bigdata.sparse.GlobalRowStoreHelper; import com.bigdata.sparse.SparseRowStore; import com.bigdata.util.InnerCause; @@ -2893,22 +2894,6 @@ return delegate.getRootBlocks(startTime); } - public void delete(long addr, IAllocationContext context) { - throw new UnsupportedOperationException(); - } - - public long write(ByteBuffer data, IAllocationContext context) { - throw new UnsupportedOperationException(); - } - - public long write(ByteBuffer data, long oldAddr, IAllocationContext context) { - throw new UnsupportedOperationException(); - } - - public void detachContext(IAllocationContext context) { - delegate.detachContext(context); - } - } /** Deleted: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IAllocationContext.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IAllocationContext.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IAllocationContext.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -1,41 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package com.bigdata.journal; - -/** - * An IAllocationContext defines a shadow environment which may be - * associated with allocations made during a transaction. - * - * @author Martyn Cutcher - * - */ -public interface IAllocationContext extends Comparable { - - /** - * @return the minimum release time for any freed allocations - */ - long minimumReleaseTime(); - -} Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalDelegate.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalDelegate.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalDelegate.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -38,12 +38,13 @@ import com.bigdata.counters.CounterSet; import com.bigdata.mdi.IResourceMetadata; import com.bigdata.relation.locator.IResourceLocator; +import com.bigdata.rwstore.IAllocationContext; import com.bigdata.sparse.SparseRowStore; public class JournalDelegate implements IJournal { - final IJournal delegate; + protected final AbstractJournal delegate; - public JournalDelegate(final IJournal source) { + public JournalDelegate(final AbstractJournal source) { this.delegate = source; } @@ -226,20 +227,4 @@ public TemporaryStore getTempStore() { return delegate.getTempStore(); } - - public void delete(long addr, IAllocationContext context) { - delegate.delete(addr, context); - } - - public long write(ByteBuffer data, IAllocationContext context) { - return delegate.write(data, context); - } - - public long write(ByteBuffer data, long oldAddr, IAllocationContext context) { - return delegate.write(data, oldAddr, context); - } - - public void detachContext(IAllocationContext context) { - delegate.detachContext(context); - } } Deleted: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalShadow.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalShadow.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalShadow.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -1,85 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package com.bigdata.journal; - -import java.nio.ByteBuffer; -import java.util.concurrent.atomic.AtomicLong; - -/** - * A JournalShadow wraps a Journal as a JournalDelegate but provides itself - * as the allocation context to be passed through to any interested - * BufferStrategy. - * - * This is the path by which RWStore allocators are provided the context for - * the allocations and deletes made - * - * @author Martyn Cutcher - * - */ -public class JournalShadow extends JournalDelegate implements IAllocationContext { - static AtomicLong s_idCounter = new AtomicLong(23); - int m_id = (int) s_idCounter.incrementAndGet(); - - public JournalShadow(IJournal source) { - super(source); - } - - public long write(ByteBuffer data) { - return delegate.write(data, this); - } - - public long write(ByteBuffer data, long oldAddr) { - return delegate.write(data, oldAddr, this); - } - - public void delete(long oldAddr) { - delegate.delete(oldAddr, this); - } - - public int compareTo(Object o) { - if (o instanceof JournalShadow) { - JournalShadow js = (JournalShadow) o; - return m_id - js.m_id; - } else { - return -1; - } - } - - /** - * TODO: should retrieve from localTransactionService or Journal - * properties - */ - public long minimumReleaseTime() { - return 0; - } - - /** - * Release itself from the wrapped Journal, this unlocks the allocator for - * the RWStore - */ - public void detach() { - delegate.detachContext(this); - } -} Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -42,6 +42,7 @@ import com.bigdata.quorum.Quorum; import com.bigdata.rawstore.AbstractRawStore; import com.bigdata.rawstore.IAddressManager; +import com.bigdata.rwstore.IAllocationContext; import com.bigdata.rwstore.RWStore; import com.bigdata.service.AbstractTransactionService; import com.bigdata.util.ChecksumUtility; Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/AbstractRawStore.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/AbstractRawStore.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/AbstractRawStore.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -30,7 +30,7 @@ import java.nio.ByteBuffer; import com.bigdata.LRUNexus; -import com.bigdata.journal.IAllocationContext; +import com.bigdata.rwstore.IAllocationContext; /** * Abstract base class for {@link IRawStore} implementations. This class uses a Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/IRawStore.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/IRawStore.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/IRawStore.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -37,8 +37,8 @@ import com.bigdata.counters.CounterSet; import com.bigdata.io.IByteArrayBuffer; import com.bigdata.journal.AbstractJournal; -import com.bigdata.journal.IAllocationContext; import com.bigdata.mdi.IResourceMetadata; +import com.bigdata.rwstore.IAllocationContext; /** * <p> @@ -124,8 +124,7 @@ */ public long write(ByteBuffer data); - /** - * Write the data (unisolated). + /** * * @param data * The data. The bytes from the current @@ -135,43 +134,6 @@ * {@link ByteBuffer#limit()} . The caller may subsequently * modify the contents of the buffer without changing the state * of the store (i.e., the data are copied into the store). - * - * @param context defines teh shadow AllocationContext from which this call - * was made - * - * @return A long integer formed that encodes both the offset from which the - * data may be read and the #of bytes to be read. See - * {@link IAddressManager}. - * - * @throws IllegalArgumentException - * if <i>data</i> is <code>null</code>. - * @throws IllegalArgumentException - * if <i>data</i> has zero bytes {@link ByteBuffer#remaining()}. - * @throws IllegalStateException - * if the store is not open. - * @throws IllegalStateException - * if the store does not allow writes. - * - * @todo define exception if the maximum extent would be exceeded. - * - * @todo the addresses need to reflect the ascending offset at which the - * data are written, at least for a class of append only store. some - * stores, such as the Journal, also have an offset from the start of - * the file to the start of the data region (in the case of the - * Journal it is used to hold the root blocks). - */ - public long write(ByteBuffer data, IAllocationContext context); - - /** - * - * @param data - * The data. The bytes from the current - * {@link ByteBuffer#position()} to the - * {@link ByteBuffer#limit()} will be written and the - * {@link ByteBuffer#position()} will be advanced to the - * {@link ByteBuffer#limit()} . The caller may subsequently - * modify the contents of the buffer without changing the state - * of the store (i.e., the data are copied into the store). * @param oldAddr as returned from a previous write of the same object, or zero if a new write * * @return A long integer formed that encodes both the offset from which the @@ -180,25 +142,6 @@ */ public long write(ByteBuffer data, long oldAddr); - /** - * - * @param data - * The data. The bytes from the current - * {@link ByteBuffer#position()} to the - * {@link ByteBuffer#limit()} will be written and the - * {@link ByteBuffer#position()} will be advanced to the - * {@link ByteBuffer#limit()} . The caller may subsequently - * modify the contents of the buffer without changing the state - * of the store (i.e., the data are copied into the store). - * @param oldAddr as returned from a previous write of the same object, or zero if a new write - * @param context defines the shadow AllocationContext from which this call is made - * - * @return A long integer formed that encodes both the offset from which the - * data may be read and the #of bytes to be read. See - * {@link IAddressManager}. - */ - public long write(ByteBuffer data, long oldAddr, IAllocationContext context); - /** * Delete the data (unisolated). * <p> @@ -226,48 +169,6 @@ public void delete(long addr); /** - * Delete the data (unisolated). - * <p> - * After this operation subsequent reads on the address MAY fail and the - * caller MUST NOT depend on the ability to read at that address. - * - * @param addr - * A long integer formed using {@link Addr} that encodes both the - * offset at which the data was written and the #of bytes that - * were written. - * - * @param context - * Defines the shadow AllocationContext from which this call is - * made. For RWStore this can be used to immediately free the - * allocation if it can be determined to have orignally have - * been requested from the same context. - * - * @exception IllegalArgumentException - * If the address is known to be invalid (never written or - * deleted). Note that the address 0L is always invalid. - * - * It is only applicable in the - * context of a garbage collection strategy. With an append only - * store and with eviction of btrees into index segments there - * is no reason to delete anything on the store - and nothing to - * keep track of the delete. - * - * However, with a Read-Write store it is a requirement, and a void - * implementation is provided for other stores. - */ - public void delete(long addr, IAllocationContext context); - - /** - * - * @param context - * Defines the shadow AllocationContext that may have been used - * to allocate or delete storage. The RWStore assigns - * Allocation areas to specific contexts and these must be - * released for use by others. - */ - public void detachContext(IAllocationContext context); - - /** * Read the data (unisolated). * * @param addr Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -27,7 +27,6 @@ import java.util.ArrayList; import com.bigdata.io.writecache.WriteCacheService; -import com.bigdata.journal.IAllocationContext; /** * Bit maps for an allocator. The allocator is a bit map managed as int[]s. Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/Allocator.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/Allocator.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/Allocator.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -28,7 +28,6 @@ import java.util.ArrayList; import java.util.concurrent.atomic.AtomicLong; -import com.bigdata.journal.IAllocationContext; public interface Allocator extends Comparable { public int getBlockSize(); Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/BlobAllocator.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/BlobAllocator.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/BlobAllocator.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -7,7 +7,6 @@ import java.util.ArrayList; import java.util.concurrent.atomic.AtomicLong; -import com.bigdata.journal.IAllocationContext; import com.bigdata.util.ChecksumUtility; /** Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -30,7 +30,6 @@ import org.apache.log4j.Logger; -import com.bigdata.journal.IAllocationContext; import com.bigdata.util.ChecksumUtility; /** Copied: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/IAllocationContext.java (from rev 3484, branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IAllocationContext.java) =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/IAllocationContext.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/IAllocationContext.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -0,0 +1,36 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.rwstore; + +/** + * An IAllocationContext defines a shadow environment which may be + * associated with allocations made during a transaction. + * + * @author Martyn Cutcher + * + */ +public interface IAllocationContext { + +} Property changes on: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/IAllocationContext.java ___________________________________________________________________ Added: svn:mime-type + text/plain Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/IStore.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/IStore.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/IStore.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -26,7 +26,6 @@ import java.io.File; -import com.bigdata.journal.IAllocationContext; /************************************************************************************************ * The IStore interface provides persistent file-backed storage. Copied: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/JournalShadow.java (from rev 3484, branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalShadow.java) =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/JournalShadow.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/JournalShadow.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -0,0 +1,107 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.rwstore; + +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicLong; + +import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.IJournal; +import com.bigdata.journal.JournalDelegate; +import com.bigdata.journal.RWStrategy; + +/** + * A JournalShadow wraps a Journal as a JournalDelegate but provides itself + * as the allocation context to be passed through to any interested + * BufferStrategy. + * + * This is the path by which RWStore allocators are provided the context for + * the allocations and deletes made + * + * @author Martyn Cutcher + * + */ +public class JournalShadow extends JournalDelegate implements IAllocationContext { + static AtomicLong s_idCounter = new AtomicLong(23); + int m_id = (int) s_idCounter.incrementAndGet(); + + private JournalShadow(AbstractJournal source) { + super(source); + } + + public long write(ByteBuffer data) { + return delegate.write(data, this); + } + + public long write(ByteBuffer data, long oldAddr) { + return delegate.write(data, oldAddr, this); + } + + public void delete(long oldAddr) { + delegate.delete(oldAddr, this); + } + + public int compareTo(Object o) { + if (o instanceof JournalShadow) { + JournalShadow js = (JournalShadow) o; + return m_id - js.m_id; + } else { + return -1; + } + } + + /** + * TODO: should retrieve from localTransactionService or Journal + * properties + */ + public long minimumReleaseTime() { + return 0; + } + + /** + * Release itself from the wrapped Journal, this unlocks the allocator for + * the RWStore + */ + public void detach() { + delegate.detachContext(this); + } + + /** + * This factory pattern creates a shadow for a RWStrategy-backed Journal + * to support protected allocations while allowing for deletion and + * re-allocation where possible. If the Journal is not backed by a + * RWStrategy, then the original Journal is returned. + * + * @param journal - the journal to be shadowed + * @return the shadowed journal if necessary + */ + public static IJournal newShadow(AbstractJournal journal) { + if (journal.getBufferStrategy() instanceof RWStrategy) { + return new JournalShadow(journal); + } else { + return journal; + } + } +} Property changes on: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/JournalShadow.java ___________________________________________________________________ Added: svn:mime-type + text/plain Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/PSOutputStream.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/PSOutputStream.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/PSOutputStream.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -33,7 +33,6 @@ import org.apache.log4j.Logger; -import com.bigdata.journal.IAllocationContext; /************************************************************************ * PSOutputStream Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -35,6 +35,7 @@ import java.nio.channels.FileChannel; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.TreeMap; @@ -53,7 +54,6 @@ import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.CommitRecordIndex; import com.bigdata.journal.ForceEnum; -import com.bigdata.journal.IAllocationContext; import com.bigdata.journal.ICommitRecord; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.JournalTransactionService; @@ -2728,9 +2728,8 @@ m_allocationLock.lock(); try { int addrs = strBuf.readInt(); - System.out.println("Freeing deferred deletes: " + addrs); - while (addrs-- > 0) { + while (addrs-- > 0) { // while (false && addrs-- > 0) { int nxtAddr = strBuf.readInt(); Allocator alloc = getBlock(nxtAddr); @@ -2873,8 +2872,8 @@ } } - private TreeMap<IAllocationContext, ContextAllocation> m_contexts = - new TreeMap<IAllocationContext, ContextAllocation>(); + private HashMap<IAllocationContext, ContextAllocation> m_contexts = + new HashMap<IAllocationContext, ContextAllocation>(); ContextAllocation establishContextAllocation(IAllocationContext context) { ContextAllocation ret = m_contexts.get(context); Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractMRMWTestCase.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractMRMWTestCase.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractMRMWTestCase.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -235,7 +235,7 @@ final long timeout = 20; - final int ntrials = 10000; + final int ntrials = 2000; final int nclients = 20; Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/StressTestConcurrentTx.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/StressTestConcurrentTx.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/StressTestConcurrentTx.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -130,6 +130,8 @@ */ public void test_concurrentClients() throws InterruptedException { + new Float("123.23"); + final Properties properties = getProperties(); final Journal journal = new Journal(properties); @@ -150,14 +152,13 @@ doConcurrentClientTest(// journal, // - 10,// timeout + 30,// timeout 20,// nclients - 1000, // ntrials + 500, // ntrials 3,// keylen 100,// nops .10// abortRate ); - } finally { journal.destroy(); @@ -337,7 +338,15 @@ } } - + + // Now test rootBlocks + int rootBlockCount = 0; + Iterator<IRootBlockView> rbvs = journal.getRootBlocks(10); // cannot use 0 + while (rbvs.hasNext()) { + IRootBlockView rbv = rbvs.next(); + rootBlockCount++; + } + // immediately terminate any tasks that are still running. log.warn("Shutting down now!"); journal.shutdownNow(); @@ -357,6 +366,7 @@ ret.put("naborted",""+naborted); ret.put("ncommitted",""+ncommitted); ret.put("nuncommitted", ""+nuncommitted); + ret.put("rootBlocks found", ""+rootBlockCount); ret.put("elapsed(ms)", ""+elapsed); ret.put("tps", ""+(ncommitted * 1000 / elapsed)); ret.put("bytesWritten", ""+bytesWritten); Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/StressTestConcurrentUnisolatedIndices.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/StressTestConcurrentUnisolatedIndices.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/journal/StressTestConcurrentUnisolatedIndices.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -48,6 +48,7 @@ import com.bigdata.btree.IIndex; import com.bigdata.btree.IndexMetadata; import com.bigdata.rawstore.Bytes; +import com.bigdata.rwstore.JournalShadow; import com.bigdata.test.ExperimentDriver; import com.bigdata.test.ExperimentDriver.IComparisonTest; import com.bigdata.test.ExperimentDriver.Result; @@ -118,13 +119,13 @@ // } doConcurrentClientTest(journal,// - 10,// timeout + 80,// timeout 20,// nresources 1, // minLocks 3, // maxLocks 1000, // ntrials 3, // keyLen - 100, // nops + 1000, // nops 0.02d // failureRate ); @@ -404,76 +405,73 @@ */ public Object doTask() throws Exception { - // the index names on which the writer holds a lock. - final String[] resource = getResource(); - - final IIndex[] indices = new IIndex[resource.length]; - - for (int i = 0; i < resource.length; i++) { + // the index names on which the writer holds a lock. + final String[] resource = getResource(); - indices[i] = getJournal().getIndex(resource[i]); + final IIndex[] indices = new IIndex[resource.length]; - final Thread t = Thread.currentThread(); - - if (btrees.putIfAbsent(indices[i], t) != null) { + for (int i = 0; i < resource.length; i++) { + indices[i] = getJournal().getIndex(resource[i]); - throw new AssertionError( - "Unisolated index already in use: " + resource[i]); + final Thread t = Thread.currentThread(); - } + if (btrees.putIfAbsent(indices[i], t) != null) { - } - - try { + throw new AssertionError("Unisolated index already in use: " + resource[i]); - // Random write operations on the named index(s). + } - for (int i = 0; i < nops; i++) { + } - final IIndex ndx = indices[i % resource.length]; - - final byte[] key = new byte[keyLen]; + try { - r.nextBytes(key); + // Random write operations on the named index(s). - if (r.nextInt(100) > 10) { + for (int i = 0; i < nops; i++) { - byte[] val = new byte[5]; + final IIndex ndx = indices[i % resource.length]; - r.nextBytes(val); + final byte[] key = new byte[keyLen]; - ndx.insert(key, val); + r.nextBytes(key); - } else { + if (r.nextInt(100) > 10) { - ndx.remove(key); + byte[] val = new byte[5]; - } + r.nextBytes(val); - } + ndx.insert(key, val); - if (r.nextDouble() < failureRate) { + } else { - throw new SpuriousException(); + ndx.remove(key); - } + } - return null; - - } finally { + } - for(int i=0; i<resource.length; i++) { + if (r.nextDouble() < failureRate) { - final IIndex ndx = indices[i]; - - if (ndx != null) - btrees.remove(ndx); - - } + throw new SpuriousException(); - } - - } + } + + return null; + + } finally { + + for (int i = 0; i < resource.length; i++) { + + final IIndex ndx = indices[i]; + + if (ndx != null) + btrees.remove(ndx); + + } + + } + } } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2010-09-01 17:41:31 UTC (rev 3486) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2010-09-01 17:46:11 UTC (rev 3487) @@ -44,9 +44,7 @@ import com.bigdata.journal.AbstractRestartSafeTestCase; import com.bigdata.journal.BufferMode; import com.bigdata.journal.DiskOnlyStrategy; -import com.bigdata.journal.IAllocationContext; import com.bigdata.journal.Journal; -import com.bigdata.journal.JournalShadow; import com.bigdata.journal.RWStrategy; import com.bigdata.journal.TestJournalBasics; import com.bigdata.journal.Journal.Options; @@ -939,22 +937,6 @@ } static class DummyAllocationContext implements IAllocationContext { - static int s_id = 23; - - int m_id = s_id++; - - public int compareTo(Object o) { - if (o instanceof DummyAllocationContext) { - return m_id - ((DummyAllocationContext) o).m_id; - } else { - return -1; - } - } - - public long minimumReleaseTime() { - return 0; // indicates immediate release - } - } /** This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <fko...@us...> - 2010-09-01 17:41:37
|
Revision: 3486 http://bigdata.svn.sourceforge.net/bigdata/?rev=3486&view=rev Author: fkoliver Date: 2010-09-01 17:41:31 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Replace cluster config sections using FileSystemScanner for bulk loading with alternatives (1) using FileServer and FileSystemScannerServer to serve up files from a local non-shared file system to the bulk loader, and (2) using URLListScanner to provide URLs for externally served up files. Replace "clientsTemplate" with "clientServiceCount" as the code which searches for IClientService instances moved from config to java. Modified Paths: -------------- branches/maven_scaleout/src/main/deploy/var/config/jini/bigdataCluster.config branches/maven_scaleout/src/main/deploy/var/config/jini/bigdataCluster16.config branches/maven_scaleout/src/main/deploy/var/config/jini/bigdataStandalone.config Modified: branches/maven_scaleout/src/main/deploy/var/config/jini/bigdataCluster.config =================================================================== --- branches/maven_scaleout/src/main/deploy/var/config/jini/bigdataCluster.config 2010-09-01 16:59:34 UTC (rev 3485) +++ branches/maven_scaleout/src/main/deploy/var/config/jini/bigdataCluster.config 2010-09-01 17:41:31 UTC (rev 3486) @@ -37,6 +37,7 @@ import com.bigdata.rdf.lexicon.LexiconRelation; import com.bigdata.rdf.lexicon.LexiconKeyOrder; import com.bigdata.rawstore.Bytes; +import java.net.URL; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit.*; @@ -1726,17 +1727,34 @@ // When true, a pre-existing job with the same name is deleted first. deleteJob = true; - // Scanner identifies resources to be loaded. - resourceScannerFactory = com.bigdata.service.jini.master.FileSystemScanner.newFactory( - new File("@NAS@/lubm/U10"), // dataDir - //new File("/nas/metrics/lehigh/U10-compressed"), // dataDir - new com.bigdata.rdf.load.RDFFilenameFilter() // optional filename filter. - ); + // ALTERNATIVE 1: Start http servers for the directories containing + // the ontology and the data files: - // The ontology to load (file or directory) when the KB is created. - ontology = new File("@install.lubm.config.dir@/univ-bench.owl"); - //ontology = new File("/nas/metrics/lehigh/univ-bench.owl"); + ontologyDir = new File("/tmp/lubm"); + dataDir = new File("/tmp/lubm/datafiles"); + static dataServer = new com.bigdata.service.jini.master.FileServer( + dataDir, 20, 8702, 20); + static ontologyServer = new com.bigdata.service.jini.master.FileServer( + ontologyDir, 5, 8703, 5); + resourceScannerFactory = + com.bigdata.service.jini.master.FileSystemScannerServer.newFactory( + dataDir, + new com.bigdata.rdf.load.RDFFilenameFilter(), dataServer); + ontology = com.bigdata.service.jini.master.FileServer.getURL( + ontologyServer, "/univ-bench.owl"); + // ALTERNATIVE 2: Supply the data files and ontology from an + // external web server. + +// ontology = new URL("http://stub/lubm/univ-bench.owl"); +// resourceScannerFactory = +// com.bigdata.service.jini.master.URLListScanner.newFactory( +// new URL[] { +// new URL("http://stub/lubm/datafiles/University0_0.owl"), +// new URL("http://stub/lubm/datafiles/University0_1.owl"), +// ... +// }); + // The maximum thread pool size for RDF parser tasks. //parserPoolSize = 5; @@ -1787,7 +1805,7 @@ forceOverflow = false; /* How long the master will wait in milliseconds to discover the services - * that you specify for [servicesTemplates] and [clientsTemplate]. + * that you specify for [servicesTemplates]. */ awaitServicesTimeout = 10000; @@ -1831,25 +1849,8 @@ }; - /* Template for matching the services to which the clients will be - * distributed for execution. Normally you will specify - * IClientService as the interface to be discovered. While it is - * possible to run tasks on an IDataService or even an - * IMetadataService since they both implement IRemoteExecutor, it - * is generally discouraged unless the tasks require explicit - * access to the local index partitions for their execution. - */ - clientsTemplate = new ServicesTemplate( - bigdata.clientServiceCount, // minMatches - new ServiceTemplate( - null, //serviceID - new Class[]{ - com.bigdata.service.IClientService.class - }, - null // attributes - ), - null // filter - ); + // Minimum number of client services for distributed execution. + clientServiceCount = bigdata.clientServiceCount; /* * RDF distributed data loader options. @@ -1890,7 +1891,7 @@ forceOverflow = true; /* How long the master will wait in milliseconds to discover the services - * that you specify for [servicesTemplates] and [clientsTemplate]. + * that you specify for [servicesTemplates]. */ awaitServicesTimeout = 10000; @@ -1934,25 +1935,8 @@ }; - /* Template for matching the services to which the clients will be - * distributed for execution. Normally you will specify - * IClientService as the interface to be discovered. While it is - * possible to run tasks on an IDataService or even an - * IMetadataService since they both implement IRemoteExecutor, it - * is generally discouraged unless the tasks require explicit - * access to the local index partitions for their execution. - */ - clientsTemplate = new ServicesTemplate( - bigdata.clientServiceCount, // minMatches - new ServiceTemplate( - null, //serviceID - new Class[]{ - com.bigdata.service.IClientService.class - }, - null // attributes - ), - null // filter - ); + // Minimum number of client services for distributed execution. + clientServiceCount = bigdata.clientServiceCount; /* * RDF distributed data loader options. @@ -2063,7 +2047,7 @@ } -com.bigdata.service.jini.BroadcastSighup { +com.bigdata.service.jini.util.BroadcastSighup { pushConfig = false; @@ -2127,24 +2111,8 @@ }; - /* Template for matching the services to which the clients will be - * distributed for execution. Normally you will specify - * IClientService as the interface to be discovered. While it is - * possible to run tasks on an IDataService or even an - * IMetadataService since they both implement IRemoteExecutor, it - * is generally discouraged unless the tasks require explicit - * access to the local index partitions for their execution. - */ - clientsTemplate = new ServicesTemplate( - bigdata.clientServiceCount, // minMatches - new ServiceTemplate( - null, //serviceID - new Class[]{ - com.bigdata.service.IClientService.class - }, - null), // attributes - null // filter - ); + // Minimum number of client services for distributed execution. + clientServiceCount = bigdata.clientServiceCount; /* The initial #of index partitions for the scale-out index * (computed as #partitions per data service). Choose at least Modified: branches/maven_scaleout/src/main/deploy/var/config/jini/bigdataCluster16.config =================================================================== --- branches/maven_scaleout/src/main/deploy/var/config/jini/bigdataCluster16.config 2010-09-01 16:59:34 UTC (rev 3485) +++ branches/maven_scaleout/src/main/deploy/var/config/jini/bigdataCluster16.config 2010-09-01 17:41:31 UTC (rev 3486) @@ -37,6 +37,7 @@ import com.bigdata.rdf.lexicon.LexiconRelation; import com.bigdata.rdf.lexicon.LexiconKeyOrder; import com.bigdata.rawstore.Bytes; +import java.net.URL; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit.*; @@ -1775,19 +1776,34 @@ // KB properties made visible to JiniFederation#getProperties() properties = lubm.properties; - // Scanner identifies resources to be loaded. - resourceScannerFactory = com.bigdata.service.jini.master.FileSystemScanner.newFactory( - new File("/nas/data/U8000"), // dataDir - //new File("@NAS@/lubm/U10"), // dataDir - new com.bigdata.rdf.load.RDFFilenameFilter() // optional filename filter. - ); + // ALTERNATIVE 1: Start http servers for the directories containing + // the ontology and the data files: - // The ontology to load (file or directory) when the KB is created. - // This is a directory containing the ontology and some pre-generated data sets. - ontology = new File("/nas/data/univ-bench.owl"); - // This is the directory into which the ontology is installed by 'ant lubm-install'. - //ontology = new File("@install.lubm.config.dir@/univ-bench.owl"); + ontologyDir = new File("/tmp/lubm"); + dataDir = new File("/tmp/lubm/datafiles"); + static dataServer = new com.bigdata.service.jini.master.FileServer( + dataDir, 20, 8702, 20); + static ontologyServer = new com.bigdata.service.jini.master.FileServer( + ontologyDir, 5, 8703, 5); + resourceScannerFactory = + com.bigdata.service.jini.master.FileSystemScannerServer.newFactory( + dataDir, + new com.bigdata.rdf.load.RDFFilenameFilter(), dataServer); + ontology = com.bigdata.service.jini.master.FileServer.getURL( + ontologyServer, "/univ-bench.owl"); + // ALTERNATIVE 2: Supply the data files and ontology from an + // external web server. + +// ontology = new URL("http://stub/lubm/univ-bench.owl"); +// resourceScannerFactory = +// com.bigdata.service.jini.master.URLListScanner.newFactory( +// new URL[] { +// new URL("http://stub/lubm/datafiles/University0_0.owl"), +// new URL("http://stub/lubm/datafiles/University0_1.owl"), +// ... +// }); + // The maximum thread pool size for RDF parser tasks. //parserPoolSize = 5; @@ -1838,7 +1854,7 @@ forceOverflow = false; /* How long the master will wait in milliseconds to discover the services - * that you specify for [servicesTemplates] and [clientsTemplate]. + * that you specify for [servicesTemplates]. */ awaitServicesTimeout = 10000; @@ -1882,25 +1898,8 @@ }; - /* Template for matching the services to which the clients will be - * distributed for execution. Normally you will specify - * IClientService as the interface to be discovered. While it is - * possible to run tasks on an IDataService or even an - * IMetadataService since they both implement IRemoteExecutor, it - * is generally discouraged unless the tasks require explicit - * access to the local index partitions for their execution. - */ - clientsTemplate = new ServicesTemplate( - bigdata.clientServiceCount, // minMatches - new ServiceTemplate( - null, //serviceID - new Class[]{ - com.bigdata.service.IClientService.class - }, - null // attributes - ), - null // filter - ); + // Minimum number of client services for distributed execution. + clientServiceCount = bigdata.clientServiceCount; /* * RDF distributed data loader options. @@ -1941,7 +1940,7 @@ forceOverflow = true; /* How long the master will wait in milliseconds to discover the services - * that you specify for [servicesTemplates] and [clientsTemplate]. + * that you specify for [servicesTemplates]. */ awaitServicesTimeout = 10000; @@ -1985,25 +1984,8 @@ }; - /* Template for matching the services to which the clients will be - * distributed for execution. Normally you will specify - * IClientService as the interface to be discovered. While it is - * possible to run tasks on an IDataService or even an - * IMetadataService since they both implement IRemoteExecutor, it - * is generally discouraged unless the tasks require explicit - * access to the local index partitions for their execution. - */ - clientsTemplate = new ServicesTemplate( - bigdata.clientServiceCount, // minMatches - new ServiceTemplate( - null, //serviceID - new Class[]{ - com.bigdata.service.IClientService.class - }, - null // attributes - ), - null // filter - ); + // Minimum number of client services for distributed execution. + clientServiceCount = bigdata.clientServiceCount; /* * RDF distributed data loader options. @@ -2181,24 +2163,8 @@ }; - /* Template for matching the services to which the clients will be - * distributed for execution. Normally you will specify - * IClientService as the interface to be discovered. While it is - * possible to run tasks on an IDataService or even an - * IMetadataService since they both implement IRemoteExecutor, it - * is generally discouraged unless the tasks require explicit - * access to the local index partitions for their execution. - */ - clientsTemplate = new ServicesTemplate( - bigdata.clientServiceCount, // minMatches - new ServiceTemplate( - null, //serviceID - new Class[]{ - com.bigdata.service.IClientService.class - }, - null), // attributes - null // filter - ); + // Minimum number of client services for distributed execution. + clientServiceCount = bigdata.clientServiceCount; /* The initial #of index partitions for the scale-out index * (computed as #partitions per data service). Choose at least Modified: branches/maven_scaleout/src/main/deploy/var/config/jini/bigdataStandalone.config =================================================================== --- branches/maven_scaleout/src/main/deploy/var/config/jini/bigdataStandalone.config 2010-09-01 16:59:34 UTC (rev 3485) +++ branches/maven_scaleout/src/main/deploy/var/config/jini/bigdataStandalone.config 2010-09-01 17:41:31 UTC (rev 3486) @@ -37,6 +37,7 @@ import com.bigdata.rdf.lexicon.LexiconRelation; import com.bigdata.rdf.lexicon.LexiconKeyOrder; import com.bigdata.rawstore.Bytes; +import java.net.URL; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit.*; @@ -1740,16 +1741,15 @@ // When true, a pre-existing job with the same name is deleted first. deleteJob = true; - // Scanner identifies resources to be loaded. - resourceScannerFactory = com.bigdata.service.jini.master.FileSystemScanner.newFactory( - new File("/nas/data/lubm/U1000"), // dataDir - //new File("/nas/metrics/lehigh/U10-compressed"), // dataDir - new com.bigdata.rdf.load.RDFFilenameFilter() // optional filename filter. + // Scanner identifies resources to be loaded. + resourceScannerFactory = com.bigdata.service.jini.master.URLListScanner.newFactory( + new File("/nas/data/lubm/U1000").toURI().toURL(), // dataDir + new com.bigdata.rdf.load.RDFFilenameFilter() // optional filename filter. ); // The ontology to load (file or directory) when the KB is created. //ontology = new File("@install.lubm.config.dir@/univ-bench.owl"); - ontology = new File("/nas/data/lubm/univ-bench.owl"); + ontology = new File("/nas/data/lubm/univ-bench.owl").toURI().toURL(); // The maximum thread pool size for RDF parser tasks. //parserPoolSize = 5; @@ -1801,7 +1801,7 @@ forceOverflow = false; /* How long the master will wait in milliseconds to discover the services - * that you specify for [servicesTemplates] and [clientsTemplate]. + * that you specify for [servicesTemplates]. */ awaitServicesTimeout = 10000; @@ -1845,25 +1845,8 @@ }; - /* Template for matching the services to which the clients will be - * distributed for execution. Normally you will specify - * IClientService as the interface to be discovered. While it is - * possible to run tasks on an IDataService or even an - * IMetadataService since they both implement IRemoteExecutor, it - * is generally discouraged unless the tasks require explicit - * access to the local index partitions for their execution. - */ - clientsTemplate = new ServicesTemplate( - bigdata.clientServiceCount, // minMatches - new ServiceTemplate( - null, //serviceID - new Class[]{ - com.bigdata.service.IClientService.class - }, - null // attributes - ), - null // filter - ); + // Minimum number of client services for distributed execution. + clientServiceCount = bigdata.clientServiceCount; /* * RDF distributed data loader options. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <sgo...@us...> - 2010-09-01 16:59:44
|
Revision: 3485 http://bigdata.svn.sourceforge.net/bigdata/?rev=3485&view=rev Author: sgossard Date: 2010-09-01 16:59:34 +0000 (Wed, 01 Sep 2010) Log Message: ----------- [maven_scaleout] : Slight restructuring of deployment tarball to bring it more in line with the dev-btm branch. Modified Paths: -------------- branches/maven_scaleout/src/main/assembly/deploy.xml branches/maven_scaleout/src/main/java/com/bigdata/process/ServiceImpl.java Added Paths: ----------- branches/maven_scaleout/src/main/deploy/bin/config/browser-logging.properties branches/maven_scaleout/src/main/deploy/bin/config/disco-logging.properties branches/maven_scaleout/src/main/deploy/legacy/install.properties branches/maven_scaleout/src/main/deploy/legacy/install.xml Removed Paths: ------------- branches/maven_scaleout/src/main/deploy/README branches/maven_scaleout/src/main/deploy/config/ branches/maven_scaleout/src/main/deploy/install.properties branches/maven_scaleout/src/main/deploy/install.xml branches/maven_scaleout/src/main/deploy/var/config/logging/browser-logging.properties branches/maven_scaleout/src/main/deploy/var/config/logging/disco-logging.properties branches/maven_scaleout/src/main/deploy/var/log/ Modified: branches/maven_scaleout/src/main/assembly/deploy.xml =================================================================== --- branches/maven_scaleout/src/main/assembly/deploy.xml 2010-09-01 16:29:16 UTC (rev 3484) +++ branches/maven_scaleout/src/main/assembly/deploy.xml 2010-09-01 16:59:34 UTC (rev 3485) @@ -8,34 +8,54 @@ <format>tar.gz</format> </formats> <fileSets> + <!-- LEGAL DOCUMENTATION --> + <!--<fileSet>--> + <!--<fileMode>0744</fileMode>--> + <!--<directoryMode>0755</directoryMode>--> + <!--<includes>--> + <!--<include>${project.basedir}/README*</include>--> + <!--<include>${project.basedir}/LICENSE*</include>--> + <!--<include>${project.basedir}/NOTICE*</include>--> + <!--<include>${project.basedir}/LEGAL/**/*</include>--> + <!--</includes>--> + <!--<excludes>--> + <!--<exclude>${project.outputDirectory}/**</exclude>--> + <!--</excludes>--> + <!--</fileSet>--> + <fileSet> + <directory>${project.basedir}/src/main/deploy/</directory> + <outputDirectory>/</outputDirectory> + <fileMode>0755</fileMode> + <directoryMode>0755</directoryMode> <includes> - <include>${project.basedir}/README*</include> - <include>${project.basedir}/LICENSE*</include> - <include>${project.basedir}/NOTICE*</include> - <include>${project.basedir}/LEGAL/**/*</include> + <include>bin/**/*</include> + <include>etc/**/*</include> + <include>legacy/bin/**/*</include> + <include>legacy/etc/**/*</include> + <include>legacy/scripts/**/*</include> </includes> <excludes> - <exclude>${project.outputDirectory}/**</exclude> + <exclude>bin/config/**/*</exclude> </excludes> </fileSet> + <fileSet> <directory>${project.basedir}/src/main/deploy/</directory> - <outputDirectory>dist/bigdata</outputDirectory> + <outputDirectory>/</outputDirectory> + <fileMode>0744</fileMode> + <directoryMode>0755</directoryMode> </fileSet> <fileSet> <directory>${project.basedir}/src/test/deploy</directory> - <outputDirectory>dist/bigdata</outputDirectory> + <outputDirectory>/</outputDirectory> + <fileMode>0744</fileMode> + <directoryMode>0755</directoryMode> </fileSet> + - <fileSet><!-- TODO: verify which items need to be in the jar, and which are accessed by file --> - <directory>${project.basedir}/src/main/resources/</directory> - <outputDirectory>dist/bigdata/conf</outputDirectory> - </fileSet> - - <!-- The following filesets are to get the staged dependency libraries from the build directory. It would normally be preferable to do the entire thing in this file using dependencySets, but the dependencySet filtering lacked the expressivity to prevent duplicate jars when breaking out runtime,test, and dl libraries. The desired effect @@ -43,17 +63,23 @@ --> <fileSet> <!-- gets all runtime jars except the main bigdata artifact --> <directory>${project.build.directory}/dependencies/lib</directory> - <outputDirectory>dist/bigdata/lib</outputDirectory> + <outputDirectory>lib</outputDirectory> + <fileMode>0744</fileMode> + <directoryMode>0755</directoryMode> </fileSet> <fileSet> <!-- gets the jini dl jars which are downloaded via RMI/JERI codebase, not the bootstrap classpath. --> <directory>${project.build.directory}/dependencies/lib-dl</directory> - <outputDirectory>dist/bigdata/lib-dl</outputDirectory> + <outputDirectory>lib-dl</outputDirectory> + <fileMode>0744</fileMode> + <directoryMode>0755</directoryMode> </fileSet> <fileSet> <!-- gets the test jars that were not already present in the runtime set. --> <directory>${project.build.directory}/dependencies/lib-test</directory> - <outputDirectory>dist/bigdata/testing/lib-test</outputDirectory> + <outputDirectory>testing/lib-test</outputDirectory> + <fileMode>0744</fileMode> + <directoryMode>0755</directoryMode> <excludes> <exclude>**/*dl.jar</exclude> <!-- TODO: couldn't get the dependency copy to exclude the jini dl jars, just runtime --> </excludes> @@ -65,10 +91,12 @@ <dependencySets> <dependencySet><!-- copies just the main bigdata artifact --> <scope>runtime</scope> - <outputDirectory>dist/bigdata/lib</outputDirectory> + <outputDirectory>lib</outputDirectory> <useProjectArtifact>true</useProjectArtifact> <useProjectAttachments>true</useProjectAttachments> <outputFileNameMapping>${artifact.artifactId}${dashClassifier?}.${artifact.extension}</outputFileNameMapping> + <fileMode>0744</fileMode> + <directoryMode>0755</directoryMode> <includes> <include>${project.groupId}:${project.artifactId}:*:jar</include> </includes> @@ -76,10 +104,12 @@ <dependencySet><!-- copies just the jarred bigdata tests --> <scope>test</scope> - <outputDirectory>dist/bigdata/testing/lib-test</outputDirectory> + <outputDirectory>testing/lib-test</outputDirectory> <useProjectArtifact>true</useProjectArtifact> <useProjectAttachments>true</useProjectAttachments> <outputFileNameMapping>${artifact.artifactId}${dashClassifier?}.${artifact.extension}</outputFileNameMapping> + <fileMode>0744</fileMode> + <directoryMode>0755</directoryMode> <includes> <include>${project.groupId}:${project.artifactId}:*:test-jar:*</include> </includes> Deleted: branches/maven_scaleout/src/main/deploy/README =================================================================== --- branches/maven_scaleout/src/main/deploy/README 2010-09-01 16:29:16 UTC (rev 3484) +++ branches/maven_scaleout/src/main/deploy/README 2010-09-01 16:59:34 UTC (rev 3485) @@ -1,4 +0,0 @@ - -Files and directories placed in this "src/main/deploy" directory are copied directly into the deployment artifact under -the path "{buildname}/dist/bigdata/". - \ No newline at end of file Copied: branches/maven_scaleout/src/main/deploy/bin/config/browser-logging.properties (from rev 3464, branches/maven_scaleout/src/main/deploy/var/config/logging/browser-logging.properties) =================================================================== --- branches/maven_scaleout/src/main/deploy/bin/config/browser-logging.properties (rev 0) +++ branches/maven_scaleout/src/main/deploy/bin/config/browser-logging.properties 2010-09-01 16:59:34 UTC (rev 3485) @@ -0,0 +1,18 @@ + +# log4j setup +log4j.rootLogger=INFO, consoleAppender +log4j.appender.consoleAppender=org.apache.log4j.ConsoleAppender +log4j.appender.consoleAppender.target=System.out +log4j.appender.consoleAppender.layout=org.apache.log4j.PatternLayout +log4j.appender.consoleAppender.layout.ConversionPattern=%d [%t] %m%n + +# java.util.logging setup +.level=INFO +handlers=java.util.logging.ConsoleHandler +java.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter +java.util.logging.ConsoleHandler.level=FINEST +com.sun.jini.logging.interval=60000 + +#com.sun.jini.level=FINEST +#com.sun.jini.example.browser.level=FINEST + Property changes on: branches/maven_scaleout/src/main/deploy/bin/config/browser-logging.properties ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Copied: branches/maven_scaleout/src/main/deploy/bin/config/disco-logging.properties (from rev 3464, branches/maven_scaleout/src/main/deploy/var/config/logging/disco-logging.properties) =================================================================== --- branches/maven_scaleout/src/main/deploy/bin/config/disco-logging.properties (rev 0) +++ branches/maven_scaleout/src/main/deploy/bin/config/disco-logging.properties 2010-09-01 16:59:34 UTC (rev 3485) @@ -0,0 +1,25 @@ +###################################################################### +# Log4j configuration file for the discovery tool +###################################################################### + +# FATAL +# ERROR +# WARN +# INFO +# DEBUG +# TRACE + +# All messages are directed to stderr. + +# log4j setup +log4j.rootLogger=WARN, Console +log4j.appender.Console=org.apache.log4j.ConsoleAppender +log4j.appender.Console.target=System.err +log4j.appender.Console.layout=org.apache.log4j.PatternLayout +log4j.appender.Console.layout.ConversionPattern=%d [%t] %-5p %c - %m%n + +# java.util.logging setup +.level=WARNING +handlers=com.bigdata.util.config.Log4jLoggingHandler +com.sun.jini.logging.interval=60000 + Property changes on: branches/maven_scaleout/src/main/deploy/bin/config/disco-logging.properties ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Deleted: branches/maven_scaleout/src/main/deploy/install.properties =================================================================== --- branches/maven_scaleout/src/main/deploy/install.properties 2010-09-01 16:29:16 UTC (rev 3484) +++ branches/maven_scaleout/src/main/deploy/install.properties 2010-09-01 16:59:34 UTC (rev 3485) @@ -1,323 +0,0 @@ -# Bigdata ant build properties. -# -# $Id$ - -## -# Properties for installing bigdata. Many of these properties are both by the -# 'ant install' target and also wind up substituted into the 'bigdataenv' script, -# the main bigdata 'configuration' file, and the logger configuration files. -## - -# The name of the bigdata federation instance. -# This field should be globally unique, to prevent multiple clusters from finding and talking to one another. -FED=your_cluster_name - -# Bigdata-specific directory on a shared volume accessible by all hosts in the -# cluster. -# -# Note: You can create the appropriate permissions by creating the directory -# ahead of time and doing chown to set the user and group and then chmod to give -# the group read/write permissions. -NAS=/var/bigdata/nas - -# Bigdata-specific directory on a local volume. Each host in the cluster will -# place the persistent state for the bigdata services running on that host within -# this directory. The user which will execute bigdata MUST be able to read/write -# files on this path on each host. Therefore, if you are not installing as root -# this will need to be a file within the user's home directory or some directory -# which exists on each host and is writable by that user. -LAS=/var/bigdata/las -#LAS=~/bigdata/${FED} - -# The location of the installed JDK that will be used to build / run bigdata. -# -# Note: For Sun JVMs, 6u14 with compressed pointers is recommended for 64-bit -# machines having no more than 32G of RAM. Also, note that I have see core -# dumps with Sun jdk1.6.0_07 and FC6 on a 32-bit platform. -# -#JAVA_HOME=C:\\Program Files\\Java\\jdk1.6.0_10 -JAVA_HOME=/usr/java/default -#JAVA_HOME=/usr/lib/jvm/java-1.6.0-openjdk-1.6.0.0.x86_64 -#JAVA_HOME=/usr/java/jrockit-R27.3.0-jdk1.6.0_01 - -# The port on which the jini class server will be running. This class server -# is started on whichever host(s) are configured to run jini. It is part of -# the set of jini core services which includes reggie, etc. It is NOT setup -# to expose any classes except those found in the JARs of the jini/lib-dl -# directory. -JINI_CLASS_SERVER_PORT=9091 - -# The port on which the load balancer expose an httpd service which makes -# available the aggregated performance counters, events, and index dumps. -LOAD_BALANCER_PORT=9090 - -# Where the sysstat utilities are found (performance counter reporting for un*x). -#SYSSTAT_HOME=/usr/local/bin -SYSSTAT_HOME=/usr/bin - -# Specifies the value of com.sun.jini.jeri.tcp.useNIO. When true, use NIO for RMI. -USE_NIO=false - -# Where to install the scripts (must by readable by all hosts). -install.bin.dir=${NAS}/bin - -# Where to install the documentation. -install.doc.dir=${NAS}/doc - -# Where to install the JARs (must be readable by all hosts). -install.lib.dir=${NAS}/lib -install.lib-dl.dir=${NAS}/lib-dl - -# Where to install the configuration files (must be readable by all hosts). -install.config.dir=${NAS}/var/config - -# Where to write the log files. -install.log.dir=${NAS}/log - -# Where to write the deployment distribution. -install.dist.dir=${NAS}/dist - -# For appHome system property to set on ServicesManagerService VM -APP_HOME=${NAS} - -# The 'install-as' user (defaults to the user running the installer). -# -# @TODO ant lacks sufficient mechanisms to set the user (chown). -# -# @TODO should this be used to specify the user in the jini service config files? -install.user=your_username - -# The group on all hosts that is able to read the scripts, write log files, etc. -# This can be 'users' when trying to share across the hosts when running bigdata -# using a normal user login. It can be 'wheel' when trying to share across hosts -# using a root login. -install.group=${install.user} -#install.group=wheel - -# The file permissions mask used for files that must be read/write for all hosts. -# -# Note: umask uses the following numbers for permissions: -# -# 0 - read, write and execute -# 1 - read and write -# 2 - read and execute -# 3 - read only -# 4 - write and execute -# 5 - write only -# 6 - execute only -# 7 - no permissions -# 117 := user and group have read-write access -# 177 := user has read-write access, group and other have none. -# -# @todo not used yet - perhaps will never be used. -# -#umask.shared=117 -#umask.local=177 - -# The bigdata subsystem lock file. The user MUST be able to read/write this file -# on each host. Therefore, if you are not installing as root this will need to be -# a file within the user's home directory or some directory which exists on each -# host and is writable by that user. ${Las}/lockFile is the recommended default. -#LOCK_FILE=/var/lock/subsys/bigdata -LOCK_FILE=${LAS}/lockFile - -# The main bigdata configuration file. -bigdata.config=${install.config.dir}/jini/bigdataCluster.config - -# The main jini configuration file. -jini.config=${install.config.dir}/jini/startAll.config - -# The policy file used to start clients and services. The default policy -# file is completely open. -policyFile=${install.config.dir}/policy/policy.all - -# The host that will run the log4j SimpleSocketLogger and the port on which -# the logger will listen. This gets written into the bigdata configuration -# file and the log4j.properties file such that the logger daemon will startup -# on this host and the clients and services will log onto a socket appender -# which logs onto this host. log4j.properties (the file used by the clients -# and services) is setup to log INFO+ onto this service. It will also log -# ERROR+ onto the local console in case the socket logger is down. The socket -# logger is setup in log4jServer.properties. It logs ERROR+ onto the errorLog -# (see below), INFO+ onto the detailLog (see below), and events onto the -# eventLog (see below). -# -# Note: java.util.logging messages DO NOT get written onto this logger -- only -# log4j messages. -# -LOG4J_SOCKET_LOGGER_HOST = localhost -LOG4J_SOCKET_LOGGER_PORT = 4445 - -# The socket logger uses a DailyRollingFileAppender by default and this -# specifies the DatePattern property which determines both when the file -# will be rolled over and the name of the rolled over log file. -# -# Note: You are responsible for pruning old log files! -# -# roll over at midnight. -LOG4J_DATE_PATTERN='.'yyyy-MM-dd'.log' - -# The log4j configuration file for the clients and services. This is used -# to set the log4j.configuration property. -# -# Note: This is a URL!!! -# -log4j.config=file:${install.config.dir}/logging/log4j.properties - -# The log4j configuration file for the SimpleSocketServer. -# -# Note: This is a FILE (not a URL) -# -log4jServer.config=${install.config.dir}/logging/log4jServer.properties - -# The java.util.logging configuration file. (Jini uses java.util.logging). -# -# Note: The java.util.logging system DOES NOT use the simple socket logger. -# You have to look at the console output or otherwise configure log message -# aggregation for java.util.logging separately. -# -logging.config=${install.config.dir}/logging/log4j.properties - -# Bigdata messages at ERROR or above are logged on this file. -errorLog=${install.log.dir}/error.log - -# Bigdata messages at INFO or above (or as configured) are logged on this file. -detailLog=${install.log.dir}/detail.log - -# Bigdata events are logged on this file. -eventLog=${install.log.dir}/event.log - -# Bigdata rule evaluation is logged on this file. -ruleLog=${install.log.dir}/rule.log - -# Messages from the bigdata script are written here when it is run by cron. This -# file must be writable by all hosts. -# -# @todo are concurrent appends on this file getting lost? -stateLog=${install.log.dir}/state.log - -# When cron or a similar process is used to periodically execute the 'bigdata' -# script, the script can be invoked with the name of this file and the value in -# the file will be interpreted as the goal state for the script. The value in -# the file is initially 'status'. It is changed to 'start' to bring up the -# bigdata federation. This file must be readable by all hosts. Writes may be -# restricted to a specific user. -stateFile=${NAS}/state - -# Boolean option. When true, 'bigdata stop' and 'bigdata destroy' will use -# 'killall -9 java' to provide a sure kill for ALL java processes on the host. -# Needless to say, this option does not play well with other java components -# running on the same host (at least, running as the same user on the same -# host). This value is written into bigdataenv as an environment variable -# named "FORCE_KILL_ALL" so you can change the behavior after the install. -forceKillAll=false - -# -# NTP setup (optional, not fully automated). -# -# These properties are substituted into the sample ntp-client.conf and ntp.conf -# files. Those files may be used as a guideline for setting up ntpd on your -# cluster, but please see the following resources. -# -# See http://www.cis.udel.edu/~mills/ntp/html/ntpdate.html -# -# See http://www.brennan.id.au/09-Network_Time_Protocol.html -# -# Once you have ntp configured, you can use 'bigdata ntpSet' to sync the machines -# to the ntp server and 'bigdata ntpStart' to restart the ntpd client on those -# machines once their clocks are synchronized (ntpd can not be running when you -# need to correct for a large clock error). -# -# Note: Most of bigdata does not rely on synchronized clocks. However, the -# performance counter reporting subsystem reports local timestamps. Therefore -# synchronized clocks can make it much easier to interpret the logged performance -# counters. The load balancer bases its decision on a subset of the performance -# counters, so if there is a large clock skew that can effect its decision making. - -# The ntpd server on your local network. The value should be the value reported -# by 'hostname'. If you are not using ntpd or if you are managing it yourself, -# then you do not need to set this property. -NTP_MASTER= - -# The local network address. -NTP_NETWORK=192.168.6.0 - -# The local network mask. -NTP_NETMASK=255.255.255.0 - -## -# Properties for the 'analysis' target. -## - -# For convenience - used by other properties in this file. -#analysis.dir=E:/DPP/cluster16/U100000b/run21/opt2/run21c -analysis.dir=E:/DPP/dpaether123/async-write-runs-june-09/run5/nas/runs/run5 - -# Either the file or the directory containing the logged performance counters -# to be extracted for analysis. -analysis.counters.dir=${analysis.dir}/counters - -# Either the file or the directory containing the queries to be executed. Each -# query is a URL formed just like the URLs used to query the live bigdata federation. -# The 'file=' URL query parameter should be added to write the output of the query -# on the named file. -analysis.queries=src/resources/analysis/queries -#analysis.queries=scratchQueries.txt - -# The directory where the extracted performance counters will be written. -analysis.out.dir=${analysis.dir}/output - -## -# Properties for the "install-lubm" target (optional). -## - -# Basic install directory (scripts will go into [install.bin.dir]). -install.lubm.dir=${NAS}/lubm -# Where to install the JAR. -install.lubm.lib.dir=${install.lubm.dir}/lib -# Where to install the ontology and configuration files -install.lubm.config.dir=${install.lubm.dir}/config - -# Note: but sure to choose a port that is not already in use by the -# load balancer, by the jini core services class server, etc. You -# MUST specify the same port in the java.rmi.server.codebase property -# (this is done automatically below). -LUBM_CLASS_SERVER_PORT = 8082 - -# The name of the host on which the class server is running. This must -# be the host on which you run the 'ant install' target since the class -# server is configured to serve up classes from the ant-build/classes -# directory. -LUBM_CLASS_SERVER_HOSTNAME = XXX - -# The java.rmi.server.codebase for lubmMaster.sh. You can use wget to verify -# that the class server is working (once you start it using classServer.sh). -# -# wget -o /dev/null --no-cache ${LUBM_RMI_CODEBASE_URL}edu/lehigh/swat/bench/ubt/bigdata/LubmGeneratorMaster.class -# -LUBM_RMI_CODEBASE_URL = http://${LUBM_CLASS_SERVER_HOSTNAME}:${LUBM_CLASS_SERVER_PORT}/ - -# The LUBM configuration files and the ontology can be found on the installed system. -LUBM_ONTOLOGY_DIR=$NAS/lubm - -# properties needed to install bigdata behind the OpenRDF Sesame HTTP Server -#sesame.server.dir = C:/server/apache-tomcat-6.0.24/webapps/openrdf-sesame -#workbench.server.dir = C:/server/apache-tomcat-6.0.24/webapps/openrdf-workbench -#aduna.data.dir = C:/Documents and Settings/mike/Application Data/Aduna/OpenRDF Sesame console -#sesame.dir = C:/dev/openrdf-sesame-2.3.0 -sesame.server.dir = D:/apache-tomcat-6.0.26/webapps/openrdf-sesame -workbench.server.dir = D:/apache-tomcat-6.0.26/webapps/openrdf-workbench -aduna.data.dir = C:/Documents and Settings/Bryan Thompson/Application Data/Aduna/OpenRDF Sesame console -sesame.dir = D:/openrdf-sesame-2.3.1 - -## -# Properties which affect the performance test runs. -# - -# The top-level directory above the various data files to be used. -perf.data.dir=/usr/bigdata/data - -# The directory in which the performance tests will be run. This directory needs to be -# on a volume with a lot of room. The directory may be destroyed (by the test harness) -# after the performance tests have run their course. -perf.run.dir=/usr/bigdata/runs \ No newline at end of file Deleted: branches/maven_scaleout/src/main/deploy/install.xml =================================================================== --- branches/maven_scaleout/src/main/deploy/install.xml 2010-09-01 16:29:16 UTC (rev 3484) +++ branches/maven_scaleout/src/main/deploy/install.xml 2010-09-01 16:59:34 UTC (rev 3485) @@ -1,220 +0,0 @@ -<project name="bigdata-install" default="ant-install" basedir="."> - - <!-- assumes that this script is being run from the uncorked deployment artifact, either in the build's target directory or on a test machine. --> - <property name="deploy.dir" location="${basedir}" /> - <property name="app.home" location="${deploy.dir}" /> - <property name="deploy.lib" location="${deploy.dir}/lib" /> - <property name="deploy.lib.dl" location="${deploy.dir}/lib-dl" /> - - <property name="log4j.configuration" location="${deploy.dir}/var/config/logging/log4j.properties" /> - <property name="java.net.preferIPv4Stack" value="true" /> - <property name="default.nic" value="eth0" /> - - <property file="${basedir}/install.properties" /> - - <target name="setup" > - <exec executable="hostname" outputproperty="this.hostname" /> - </target> - - - <target name="ant-install" depends="setup" description="Ant based install on a node."> - <mkdir dir="${NAS}" /> - <mkdir dir="${LAS}" /> - - <chmod perm="ug+rw,o-rw"> - <fileset dir="${NAS}" /> - </chmod> - - <chmod perm="ug+rw,o-rw"> - <fileset dir="${LAS}" /> - </chmod> - - <mkdir dir="${install.config.dir}" /> - <mkdir dir="${install.doc.dir}" /> - <mkdir dir="${install.lib.dir}" /> - <mkdir dir="${install.bin.dir}" /> - <mkdir dir="${install.log.dir}" /> - <mkdir dir="${install.dist.dir}" /> - - <copy toDir="${install.config.dir}"> - <fileset dir="${deploy.dir}/legacy/config" /> - </copy> - - <copy toDir="${install.config.dir}"> <!-- new config directory --> - <fileset dir="${deploy.dir}/var/config" /> - </copy> - - <copy toDir="${install.doc.dir}"> - <fileset dir="${deploy.dir}"> - <include name="LICENSE.txt" /> - <include name="overview.html" /> - <include name="README-JINI" /> - <include name="bigdata/LEGAL/*" /> - <include name="bigdata-jini/LEGAL/*" /> - <include name="bigdata-rdf/LEGAL/*" /> - <include name="bigdata-sails/LEGAL/*" /> - </fileset> - </copy> - - <copy toDir="${install.lib.dir}"> - <fileset dir="${deploy.dir}/lib" /> - </copy> - - <copy toDir="${install.lib-dl.dir}"> - <fileset dir="${deploy.dir}/lib-dl" /> - </copy> - - <path id="runtime.classpath"> - <fileset dir="${install.lib.dir}"> - <include name="**/*jar" /> - </fileset> - </path> - - <pathconvert property="runtime.classpath.text" refid="runtime.classpath" /> - - <copy toDir="${install.bin.dir}"> - <fileset dir="${deploy.dir}/legacy/scripts" /> - </copy> - - <!-- parameter substitution. --> - - <replace dir="${install.bin.dir}" summary="true"> - <replacefilter token="@FED@" value="${FED}" /> - <replacefilter token="@NAS@" value="${NAS}" /> - <replacefilter token="@LAS@" value="${LAS}" /> - <replacefilter token="@JAVA_HOME@" value="${JAVA_HOME}" /> - <replacefilter token="@JINI_CLASS_SERVER_PORT@" value="${JINI_CLASS_SERVER_PORT}" /> - <replacefilter token="@LOAD_BALANCER_PORT@" value="${LOAD_BALANCER_PORT}" /> - <replacefilter token="@SYSSTAT_HOME@" value="${SYSSTAT_HOME}" /> - <replacefilter token="@USE_NIO@" value="${USE_NIO}" /> - <replacefilter token="@BIN_DIR@" value="${install.bin.dir}" /> - <replacefilter token="@LIB_DIR@" value="${install.lib.dir}" /> - <replacefilter token="@LOG_DIR@" value="${install.log.dir}" /> - <replacefilter token="@CONFIG_DIR@" value="${install.config.dir}" /> - <replacefilter token="@INSTALL_USER@" value="${install.user}" /> - <replacefilter token="@INSTALL_GROUP@" value="${install.group}" /> - <replacefilter token="@LOCK_FILE@" value="${LOCK_FILE}" /> - <replacefilter token="@BIGDATA_CONFIG@" value="${bigdata.config}" /> - <replacefilter token="@JINI_CONFIG@" value="${jini.config}" /> - <replacefilter token="@POLICY_FILE@" value="${policyFile}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_HOST@" value="${LOG4J_SOCKET_LOGGER_HOST}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_PORT@" value="${LOG4J_SOCKET_LOGGER_PORT}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_CONFIG@" value="${log4jServer.config}" /> - <replacefilter token="@LOG4J_DATE_PATTERN@" value="${LOG4J_DATE_PATTERN}" /> - <replacefilter token="@LOG4J_CONFIG@" value="${log4j.config}" /> - <replacefilter token="@LOGGING_CONFIG@" value="${logging.config}" /> - <replacefilter token="@ERROR_LOG@" value="${errorLog}" /> - <replacefilter token="@DETAIL_LOG@" value="${detailLog}" /> - <replacefilter token="@EVENT_LOG@" value="${eventLog}" /> - <replacefilter token="@RULE_LOG@" value="${ruleLog}" /> - <replacefilter token="@STATE_LOG@" value="${stateLog}" /> - <replacefilter token="@STATE_FILE@" value="${stateFile}" /> - <replacefilter token="@FORCE_KILL_ALL@" value="${forceKillAll}" /> - <replacefilter token="@NTP_MASTER@" value="${NTP_MASTER}" /> - <replacefilter token="@NTP_NETWORK@" value="${NTP_NETWORK}" /> - <replacefilter token="@NTP_NETMASK@" value="${NTP_NETMASK}" /> - <replacefilter token="@CLASSPATH@" value="${runtime.classpath.text}" /> - <replacefilter token="@APP_HOME@" value="${APP_HOME}" /> - </replace> - - <replace dir="${install.config.dir}" summary="true"> - <replacefilter token="@FED@" value="${FED}" /> - <replacefilter token="@NAS@" value="${NAS}" /> - <replacefilter token="@LAS@" value="${LAS}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_HOST@" value="${LOG4J_SOCKET_LOGGER_HOST}" /> - <replacefilter token="@JAVA_HOME@" value="${JAVA_HOME}" /> - <replacefilter token="@JINI_CLASS_SERVER_PORT@" value="${JINI_CLASS_SERVER_PORT}" /> - <replacefilter token="@LOAD_BALANCER_PORT@" value="${LOAD_BALANCER_PORT}" /> - <replacefilter token="@SYSSTAT_HOME@" value="${SYSSTAT_HOME}" /> - <replacefilter token="@USE_NIO@" value="${USE_NIO}" /> - <replacefilter token="@BIN_DIR@" value="${install.bin.dir}" /> - <replacefilter token="@LIB_DIR@" value="${install.lib.dir}" /> - <replacefilter token="@LOG_DIR@" value="${install.log.dir}" /> - <replacefilter token="@CONFIG_DIR@" value="${install.config.dir}" /> - <replacefilter token="@INSTALL_USER@" value="${install.user}" /> - <replacefilter token="@INSTALL_GROUP@" value="${install.group}" /> - <replacefilter token="@LOCK_FILE@" value="${LOCK_FILE}" /> - <replacefilter token="@BIGDATA_CONFIG@" value="${bigdata.config}" /> - <replacefilter token="@JINI_CONFIG@" value="${jini.config}" /> - <replacefilter token="@POLICY_FILE@" value="${policyFile}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_HOST@" value="${LOG4J_SOCKET_LOGGER_HOST}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_PORT@" value="${LOG4J_SOCKET_LOGGER_PORT}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_CONFIG@" value="${log4jServer.config}" /> - <replacefilter token="@LOG4J_DATE_PATTERN@" value="${LOG4J_DATE_PATTERN}" /> - <replacefilter token="@LOG4J_CONFIG@" value="${log4j.config}" /> - <replacefilter token="@LOGGING_CONFIG@" value="${logging.config}" /> - <replacefilter token="@ERROR_LOG@" value="${errorLog}" /> - <replacefilter token="@DETAIL_LOG@" value="${detailLog}" /> - <replacefilter token="@EVENT_LOG@" value="${eventLog}" /> - <replacefilter token="@RULE_LOG@" value="${ruleLog}" /> - <replacefilter token="@STATE_LOG@" value="${stateLog}" /> - <replacefilter token="@STATE_FILE@" value="${stateFile}" /> - <replacefilter token="@FORCE_KILL_ALL@" value="${forceKillAll}" /> - <replacefilter token="@NTP_MASTER@" value="${NTP_MASTER}" /> - <replacefilter token="@NTP_NETWORK@" value="${NTP_NETWORK}" /> - <replacefilter token="@NTP_NETMASK@" value="${NTP_NETMASK}" /> - <replacefilter token="@CLASSPATH@" value="${runtime.classpath.text}" /> - <replacefilter token="@APP_HOME@" value="${APP_HOME}" /> - <!-- updates the configuration file to locate the lubm ontology. --> - <replacefilter token="@install.lubm.config.dir@" value="${install.lubm.config.dir}" /> - </replace> - - <!-- fix newlines (otherwise substitutions cause things to break). --> - <fixcrlf srcDir="${install.config.dir}" /> - <fixcrlf srcDir="${install.bin.dir}" /> - - <!-- set execute bit for scripts in this directory (must be the last step). --> - <chmod perm="u+x,g+rx,o-rwx"> - <fileset dir="${install.bin.dir}"> - <exclude name="README" /> - <exclude name="POST-INSTALL" /> - </fileset> - </chmod> - - <!-- Setup the status file which will be read by the bigdata script and - the log on which that script will write its output. This is used - if cron, or a similar process, will execute the script on a periodic - basis. The initial state is always 'status'. The initial stateLog - is always empty. The state file must be readable by the group, but - could be restricted to write by a specific user. The stateLog must be - read/write for the group. --> - - <echo file="${stateFile}">status</echo> - <echo file="${stateLog}"> - </echo> - - <chmod perm="g+rw,o-rw" file="${stateFile}" /> - <chmod perm="g+rw,o-rw" file="${stateLog}" /> - - <!-- Make sure that the entire shared directory structure is read/write for the group. --> - <chmod perm="g+rwx" type="both" dir="${NAS}" verbose="true" /> - - <!-- Make sure that it is all accessible to the install group (ant 1.6+ plus extension module required). - <chown file="${NAS}" type="both" owner="${install.user}.${install.group}" verbose="true" /> - --> - - <!-- Works for earlier versions of ant LT 1.6 which do not bundle "chown". --> - <apply executable="chown" description="set owner on NAS files" os="Linux"> - <arg value="-R" /> - <arg value="${install.user}.${install.group}" /> - <dirset dir="${NAS}" /> - </apply> - - <!-- @todo check the installed configuration file (after parameter substitution). --> - <!-- @todo also check the installed jini configuration files. --> - <java classname="com.bigdata.jini.util.CheckConfiguration" - failonerror="true" fork="true" logerror="true"> - <classpath refid="runtime.classpath" /> - <arg value="${bigdata.config}" /> - </java> - - <loadfile property="postInstallMessage" srcFile="${install.bin.dir}/POST-INSTALL" /> - - <echo> - - ${postInstallMessage}</echo> - - </target> - - -</project> Copied: branches/maven_scaleout/src/main/deploy/legacy/install.properties (from rev 3464, branches/maven_scaleout/src/main/deploy/install.properties) =================================================================== --- branches/maven_scaleout/src/main/deploy/legacy/install.properties (rev 0) +++ branches/maven_scaleout/src/main/deploy/legacy/install.properties 2010-09-01 16:59:34 UTC (rev 3485) @@ -0,0 +1,323 @@ +# Bigdata ant build properties. +# +# $Id$ + +## +# Properties for installing bigdata. Many of these properties are both by the +# 'ant install' target and also wind up substituted into the 'bigdataenv' script, +# the main bigdata 'configuration' file, and the logger configuration files. +## + +# The name of the bigdata federation instance. +# This field should be globally unique, to prevent multiple clusters from finding and talking to one another. +FED=your_cluster_name + +# Bigdata-specific directory on a shared volume accessible by all hosts in the +# cluster. +# +# Note: You can create the appropriate permissions by creating the directory +# ahead of time and doing chown to set the user and group and then chmod to give +# the group read/write permissions. +NAS=/var/bigdata/nas + +# Bigdata-specific directory on a local volume. Each host in the cluster will +# place the persistent state for the bigdata services running on that host within +# this directory. The user which will execute bigdata MUST be able to read/write +# files on this path on each host. Therefore, if you are not installing as root +# this will need to be a file within the user's home directory or some directory +# which exists on each host and is writable by that user. +LAS=/var/bigdata/las +#LAS=~/bigdata/${FED} + +# The location of the installed JDK that will be used to build / run bigdata. +# +# Note: For Sun JVMs, 6u14 with compressed pointers is recommended for 64-bit +# machines having no more than 32G of RAM. Also, note that I have see core +# dumps with Sun jdk1.6.0_07 and FC6 on a 32-bit platform. +# +#JAVA_HOME=C:\\Program Files\\Java\\jdk1.6.0_10 +JAVA_HOME=/usr/java/default +#JAVA_HOME=/usr/lib/jvm/java-1.6.0-openjdk-1.6.0.0.x86_64 +#JAVA_HOME=/usr/java/jrockit-R27.3.0-jdk1.6.0_01 + +# The port on which the jini class server will be running. This class server +# is started on whichever host(s) are configured to run jini. It is part of +# the set of jini core services which includes reggie, etc. It is NOT setup +# to expose any classes except those found in the JARs of the jini/lib-dl +# directory. +JINI_CLASS_SERVER_PORT=9091 + +# The port on which the load balancer expose an httpd service which makes +# available the aggregated performance counters, events, and index dumps. +LOAD_BALANCER_PORT=9090 + +# Where the sysstat utilities are found (performance counter reporting for un*x). +#SYSSTAT_HOME=/usr/local/bin +SYSSTAT_HOME=/usr/bin + +# Specifies the value of com.sun.jini.jeri.tcp.useNIO. When true, use NIO for RMI. +USE_NIO=false + +# Where to install the scripts (must by readable by all hosts). +install.bin.dir=${NAS}/bin + +# Where to install the documentation. +install.doc.dir=${NAS}/doc + +# Where to install the JARs (must be readable by all hosts). +install.lib.dir=${NAS}/lib +install.lib-dl.dir=${NAS}/lib-dl + +# Where to install the configuration files (must be readable by all hosts). +install.config.dir=${NAS}/var/config + +# Where to write the log files. +install.log.dir=${NAS}/log + +# Where to write the deployment distribution. +install.dist.dir=${NAS}/dist + +# For appHome system property to set on ServicesManagerService VM +APP_HOME=${NAS} + +# The 'install-as' user (defaults to the user running the installer). +# +# @TODO ant lacks sufficient mechanisms to set the user (chown). +# +# @TODO should this be used to specify the user in the jini service config files? +install.user=your_username + +# The group on all hosts that is able to read the scripts, write log files, etc. +# This can be 'users' when trying to share across the hosts when running bigdata +# using a normal user login. It can be 'wheel' when trying to share across hosts +# using a root login. +install.group=${install.user} +#install.group=wheel + +# The file permissions mask used for files that must be read/write for all hosts. +# +# Note: umask uses the following numbers for permissions: +# +# 0 - read, write and execute +# 1 - read and write +# 2 - read and execute +# 3 - read only +# 4 - write and execute +# 5 - write only +# 6 - execute only +# 7 - no permissions +# 117 := user and group have read-write access +# 177 := user has read-write access, group and other have none. +# +# @todo not used yet - perhaps will never be used. +# +#umask.shared=117 +#umask.local=177 + +# The bigdata subsystem lock file. The user MUST be able to read/write this file +# on each host. Therefore, if you are not installing as root this will need to be +# a file within the user's home directory or some directory which exists on each +# host and is writable by that user. ${Las}/lockFile is the recommended default. +#LOCK_FILE=/var/lock/subsys/bigdata +LOCK_FILE=${LAS}/lockFile + +# The main bigdata configuration file. +bigdata.config=${install.config.dir}/jini/bigdataCluster.config + +# The main jini configuration file. +jini.config=${install.config.dir}/jini/startAll.config + +# The policy file used to start clients and services. The default policy +# file is completely open. +policyFile=${install.config.dir}/policy/policy.all + +# The host that will run the log4j SimpleSocketLogger and the port on which +# the logger will listen. This gets written into the bigdata configuration +# file and the log4j.properties file such that the logger daemon will startup +# on this host and the clients and services will log onto a socket appender +# which logs onto this host. log4j.properties (the file used by the clients +# and services) is setup to log INFO+ onto this service. It will also log +# ERROR+ onto the local console in case the socket logger is down. The socket +# logger is setup in log4jServer.properties. It logs ERROR+ onto the errorLog +# (see below), INFO+ onto the detailLog (see below), and events onto the +# eventLog (see below). +# +# Note: java.util.logging messages DO NOT get written onto this logger -- only +# log4j messages. +# +LOG4J_SOCKET_LOGGER_HOST = localhost +LOG4J_SOCKET_LOGGER_PORT = 4445 + +# The socket logger uses a DailyRollingFileAppender by default and this +# specifies the DatePattern property which determines both when the file +# will be rolled over and the name of the rolled over log file. +# +# Note: You are responsible for pruning old log files! +# +# roll over at midnight. +LOG4J_DATE_PATTERN='.'yyyy-MM-dd'.log' + +# The log4j configuration file for the clients and services. This is used +# to set the log4j.configuration property. +# +# Note: This is a URL!!! +# +log4j.config=file:${install.config.dir}/logging/log4j.properties + +# The log4j configuration file for the SimpleSocketServer. +# +# Note: This is a FILE (not a URL) +# +log4jServer.config=${install.config.dir}/logging/log4jServer.properties + +# The java.util.logging configuration file. (Jini uses java.util.logging). +# +# Note: The java.util.logging system DOES NOT use the simple socket logger. +# You have to look at the console output or otherwise configure log message +# aggregation for java.util.logging separately. +# +logging.config=${install.config.dir}/logging/log4j.properties + +# Bigdata messages at ERROR or above are logged on this file. +errorLog=${install.log.dir}/error.log + +# Bigdata messages at INFO or above (or as configured) are logged on this file. +detailLog=${install.log.dir}/detail.log + +# Bigdata events are logged on this file. +eventLog=${install.log.dir}/event.log + +# Bigdata rule evaluation is logged on this file. +ruleLog=${install.log.dir}/rule.log + +# Messages from the bigdata script are written here when it is run by cron. This +# file must be writable by all hosts. +# +# @todo are concurrent appends on this file getting lost? +stateLog=${install.log.dir}/state.log + +# When cron or a similar process is used to periodically execute the 'bigdata' +# script, the script can be invoked with the name of this file and the value in +# the file will be interpreted as the goal state for the script. The value in +# the file is initially 'status'. It is changed to 'start' to bring up the +# bigdata federation. This file must be readable by all hosts. Writes may be +# restricted to a specific user. +stateFile=${NAS}/state + +# Boolean option. When true, 'bigdata stop' and 'bigdata destroy' will use +# 'killall -9 java' to provide a sure kill for ALL java processes on the host. +# Needless to say, this option does not play well with other java components +# running on the same host (at least, running as the same user on the same +# host). This value is written into bigdataenv as an environment variable +# named "FORCE_KILL_ALL" so you can change the behavior after the install. +forceKillAll=false + +# +# NTP setup (optional, not fully automated). +# +# These properties are substituted into the sample ntp-client.conf and ntp.conf +# files. Those files may be used as a guideline for setting up ntpd on your +# cluster, but please see the following resources. +# +# See http://www.cis.udel.edu/~mills/ntp/html/ntpdate.html +# +# See http://www.brennan.id.au/09-Network_Time_Protocol.html +# +# Once you have ntp configured, you can use 'bigdata ntpSet' to sync the machines +# to the ntp server and 'bigdata ntpStart' to restart the ntpd client on those +# machines once their clocks are synchronized (ntpd can not be running when you +# need to correct for a large clock error). +# +# Note: Most of bigdata does not rely on synchronized clocks. However, the +# performance counter reporting subsystem reports local timestamps. Therefore +# synchronized clocks can make it much easier to interpret the logged performance +# counters. The load balancer bases its decision on a subset of the performance +# counters, so if there is a large clock skew that can effect its decision making. + +# The ntpd server on your local network. The value should be the value reported +# by 'hostname'. If you are not using ntpd or if you are managing it yourself, +# then you do not need to set this property. +NTP_MASTER= + +# The local network address. +NTP_NETWORK=192.168.6.0 + +# The local network mask. +NTP_NETMASK=255.255.255.0 + +## +# Properties for the 'analysis' target. +## + +# For convenience - used by other properties in this file. +#analysis.dir=E:/DPP/cluster16/U100000b/run21/opt2/run21c +analysis.dir=E:/DPP/dpaether123/async-write-runs-june-09/run5/nas/runs/run5 + +# Either the file or the directory containing the logged performance counters +# to be extracted for analysis. +analysis.counters.dir=${analysis.dir}/counters + +# Either the file or the directory containing the queries to be executed. Each +# query is a URL formed just like the URLs used to query the live bigdata federation. +# The 'file=' URL query parameter should be added to write the output of the query +# on the named file. +analysis.queries=src/resources/analysis/queries +#analysis.queries=scratchQueries.txt + +# The directory where the extracted performance counters will be written. +analysis.out.dir=${analysis.dir}/output + +## +# Properties for the "install-lubm" target (optional). +## + +# Basic install directory (scripts will go into [install.bin.dir]). +install.lubm.dir=${NAS}/lubm +# Where to install the JAR. +install.lubm.lib.dir=${install.lubm.dir}/lib +# Where to install the ontology and configuration files +install.lubm.config.dir=${install.lubm.dir}/config + +# Note: but sure to choose a port that is not already in use by the +# load balancer, by the jini core services class server, etc. You +# MUST specify the same port in the java.rmi.server.codebase property +# (this is done automatically below). +LUBM_CLASS_SERVER_PORT = 8082 + +# The name of the host on which the class server is running. This must +# be the host on which you run the 'ant install' target since the class +# server is configured to serve up classes from the ant-build/classes +# directory. +LUBM_CLASS_SERVER_HOSTNAME = XXX + +# The java.rmi.server.codebase for lubmMaster.sh. You can use wget to verify +# that the class server is working (once you start it using classServer.sh). +# +# wget -o /dev/null --no-cache ${LUBM_RMI_CODEBASE_URL}edu/lehigh/swat/bench/ubt/bigdata/LubmGeneratorMaster.class +# +LUBM_RMI_CODEBASE_URL = http://${LUBM_CLASS_SERVER_HOSTNAME}:${LUBM_CLASS_SERVER_PORT}/ + +# The LUBM configuration files and the ontology can be found on the installed system. +LUBM_ONTOLOGY_DIR=$NAS/lubm + +# properties needed to install bigdata behind the OpenRDF Sesame HTTP Server +#sesame.server.dir = C:/server/apache-tomcat-6.0.24/webapps/openrdf-sesame +#workbench.server.dir = C:/server/apache-tomcat-6.0.24/webapps/openrdf-workbench +#aduna.data.dir = C:/Documents and Settings/mike/Application Data/Aduna/OpenRDF Sesame console +#sesame.dir = C:/dev/openrdf-sesame-2.3.0 +sesame.server.dir = D:/apache-tomcat-6.0.26/webapps/openrdf-sesame +workbench.server.dir = D:/apache-tomcat-6.0.26/webapps/openrdf-workbench +aduna.data.dir = C:/Documents and Settings/Bryan Thompson/Application Data/Aduna/OpenRDF Sesame console +sesame.dir = D:/openrdf-sesame-2.3.1 + +## +# Properties which affect the performance test runs. +# + +# The top-level directory above the various data files to be used. +perf.data.dir=/usr/bigdata/data + +# The directory in which the performance tests will be run. This directory needs to be +# on a volume with a lot of room. The directory may be destroyed (by the test harness) +# after the performance tests have run their course. +perf.run.dir=/usr/bigdata/runs \ No newline at end of file Property changes on: branches/maven_scaleout/src/main/deploy/legacy/install.properties ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Copied: branches/maven_scaleout/src/main/deploy/legacy/install.xml (from rev 3464, branches/maven_scaleout/src/main/deploy/install.xml) =================================================================== --- branches/maven_scaleout/src/main/deploy/legacy/install.xml (rev 0) +++ branches/maven_scaleout/src/main/deploy/legacy/install.xml 2010-09-01 16:59:34 UTC (rev 3485) @@ -0,0 +1,220 @@ +<project name="bigdata-install" default="ant-install" basedir="."> + + <!-- assumes that this script is being run from the uncorked deployment artifact, either in the build's target directory or on a test machine. --> + <property name="deploy.dir" location="${basedir}/.." /> + <property name="app.home" location="${deploy.dir}" /> + <property name="deploy.lib" location="${deploy.dir}/lib" /> + <property name="deploy.lib.dl" location="${deploy.dir}/lib-dl" /> + + <property name="log4j.configuration" location="${deploy.dir}/var/config/logging/log4j.properties" /> + <property name="java.net.preferIPv4Stack" value="true" /> + <property name="default.nic" value="eth0" /> + + <property file="install.properties" /> + + <target name="setup" > + <exec executable="hostname" outputproperty="this.hostname" /> + </target> + + + <target name="ant-install" depends="setup" description="Ant based install on a node."> + <mkdir dir="${NAS}" /> + <mkdir dir="${LAS}" /> + + <chmod perm="ug+rw,o-rw"> + <fileset dir="${NAS}" /> + </chmod> + + <chmod perm="ug+rw,o-rw"> + <fileset dir="${LAS}" /> + </chmod> + + <mkdir dir="${install.config.dir}" /> + <mkdir dir="${install.doc.dir}" /> + <mkdir dir="${install.lib.dir}" /> + <mkdir dir="${install.bin.dir}" /> + <mkdir dir="${install.log.dir}" /> + <mkdir dir="${install.dist.dir}" /> + + <copy toDir="${install.config.dir}"> + <fileset dir="${deploy.dir}/legacy/config" /> + </copy> + + <copy toDir="${install.config.dir}"> <!-- new config directory --> + <fileset dir="${deploy.dir}/var/config" /> + </copy> + + <copy toDir="${install.doc.dir}"> + <fileset dir="${deploy.dir}"> + <include name="LICENSE.txt" /> + <include name="overview.html" /> + <include name="README-JINI" /> + <include name="bigdata/LEGAL/*" /> + <include name="bigdata-jini/LEGAL/*" /> + <include name="bigdata-rdf/LEGAL/*" /> + <include name="bigdata-sails/LEGAL/*" /> + </fileset> + </copy> + + <copy toDir="${install.lib.dir}"> + <fileset dir="${deploy.dir}/lib" /> + </copy> + + <copy toDir="${install.lib-dl.dir}"> + <fileset dir="${deploy.dir}/lib-dl" /> + </copy> + + <path id="runtime.classpath"> + <fileset dir="${install.lib.dir}"> + <include name="**/*jar" /> + </fileset> + </path> + + <pathconvert property="runtime.classpath.text" refid="runtime.classpath" /> + + <copy toDir="${install.bin.dir}"> + <fileset dir="${deploy.dir}/legacy/scripts" /> + </copy> + + <!-- parameter substitution. --> + + <replace dir="${install.bin.dir}" summary="true"> + <replacefilter token="@FED@" value="${FED}" /> + <replacefilter token="@NAS@" value="${NAS}" /> + <replacefilter token="@LAS@" value="${LAS}" /> + <replacefilter token="@JAVA_HOME@" value="${JAVA_HOME}" /> + <replacefilter token="@JINI_CLASS_SERVER_PORT@" value="${JINI_CLASS_SERVER_PORT}" /> + <replacefilter token="@LOAD_BALANCER_PORT@" value="${LOAD_BALANCER_PORT}" /> + <replacefilter token="@SYSSTAT_HOME@" value="${SYSSTAT_HOME}" /> + <replacefilter token="@USE_NIO@" value="${USE_NIO}" /> + <replacefilter token="@BIN_DIR@" value="${install.bin.dir}" /> + <replacefilter token="@LIB_DIR@" value="${install.lib.dir}" /> + <replacefilter token="@LOG_DIR@" value="${install.log.dir}" /> + <replacefilter token="@CONFIG_DIR@" value="${install.config.dir}" /> + <replacefilter token="@INSTALL_USER@" value="${install.user}" /> + <replacefilter token="@INSTALL_GROUP@" value="${install.group}" /> + <replacefilter token="@LOCK_FILE@" value="${LOCK_FILE}" /> + <replacefilter token="@BIGDATA_CONFIG@" value="${bigdata.config}" /> + <replacefilter token="@JINI_CONFIG@" value="${jini.config}" /> + <replacefilter token="@POLICY_FILE@" value="${policyFile}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_HOST@" value="${LOG4J_SOCKET_LOGGER_HOST}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_PORT@" value="${LOG4J_SOCKET_LOGGER_PORT}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_CONFIG@" value="${log4jServer.config}" /> + <replacefilter token="@LOG4J_DATE_PATTERN@" value="${LOG4J_DATE_PATTERN}" /> + <replacefilter token="@LOG4J_CONFIG@" value="${log4j.config}" /> + <replacefilter token="@LOGGING_CONFIG@" value="${logging.config}" /> + <replacefilter token="@ERROR_LOG@" value="${errorLog}" /> + <replacefilter token="@DETAIL_LOG@" value="${detailLog}" /> + <replacefilter token="@EVENT_LOG@" value="${eventLog}" /> + <replacefilter token="@RULE_LOG@" value="${ruleLog}" /> + <replacefilter token="@STATE_LOG@" value="${stateLog}" /> + <replacefilter token="@STATE_FILE@" value="${stateFile}" /> + <replacefilter token="@FORCE_KILL_ALL@" value="${forceKillAll}" /> + <replacefilter token="@NTP_MASTER@" value="${NTP_MASTER}" /> + <replacefilter token="@NTP_NETWORK@" value="${NTP_NETWORK}" /> + <replacefilter token="@NTP_NETMASK@" value="${NTP_NETMASK}" /> + <replacefilter token="@CLASSPATH@" value="${runtime.classpath.text}" /> + <replacefilter token="@APP_HOME@" value="${APP_HOME}" /> + </replace> + + <replace dir="${install.config.dir}" summary="true"> + <replacefilter token="@FED@" value="${FED}" /> + <replacefilter token="@NAS@" value="${NAS}" /> + <replacefilter token="@LAS@" value="${LAS}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_HOST@" value="${LOG4J_SOCKET_LOGGER_HOST}" /> + <replacefilter token="@JAVA_HOME@" value="${JAVA_HOME}" /> + <replacefilter token="@JINI_CLASS_SERVER_PORT@" value="${JINI_CLASS_SERVER_PORT}" /> + <replacefilter token="@LOAD_BALANCER_PORT@" value="${LOAD_BALANCER_PORT}" /> + <replacefilter token="@SYSSTAT_HOME@" value="${SYSSTAT_HOME}" /> + <replacefilter token="@USE_NIO@" value="${USE_NIO}" /> + <replacefilter token="@BIN_DIR@" value="${install.bin.dir}" /> + <replacefilter token="@LIB_DIR@" value="${install.lib.dir}" /> + <replacefilter token="@LOG_DIR@" value="${install.log.dir}" /> + <replacefilter token="@CONFIG_DIR@" value="${install.config.dir}" /> + <replacefilter token="@INSTALL_USER@" value="${install.user}" /> + <replacefilter token="@INSTALL_GROUP@" value="${install.group}" /> + <replacefilter token="@LOCK_FILE@" value="${LOCK_FILE}" /> + <replacefilter token="@BIGDATA_CONFIG@" value="${bigdata.config}" /> + <replacefilter token="@JINI_CONFIG@" value="${jini.config}" /> + <replacefilter token="@POLICY_FILE@" value="${policyFile}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_HOST@" value="${LOG4J_SOCKET_LOGGER_HOST}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_PORT@" value="${LOG4J_SOCKET_LOGGER_PORT}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_CONFIG@" value="${log4jServer.config}" /> + <replacefilter token="@LOG4J_DATE_PATTERN@" value="${LOG4J_DATE_PATTERN}" /> + <replacefilter token="@LOG4J_CONFIG@" value="${log4j.config}" /> + <replacefilter token="@LOGGING_CONFIG@" value="${logging.config}" /> + <replacefilter token="@ERROR_LOG@" value="${errorLog}" /> + <replacefilter token="@DETAIL_LOG@" value="${detailLog}" /> + <replacefilter token="@EVENT_LOG@" value="${eventLog}" /> + <replacefilter token="@RULE_LOG@" value="${ruleLog}" /> + <replacefilter token="@STATE_LOG@" value="${stateLog}" /> + <replacefilter token="@STATE_FILE@" value="${stateFile}" /> + <replacefilter token="@FORCE_KILL_ALL@" value="${forceKillAll}" /> + <replacefilter token="@NTP_MASTER@" value="${NTP_MASTER}" /> + <replacefilter token="@NTP_NETWORK@" value="${NTP_NETWORK}" /> + <replacefilter token="@NTP_NETMASK@" value="${NTP_NETMASK}" /> + <replacefilter token="@CLASSPATH@" value="${runtime.classpath.text}" /> + <replacefilter token="@APP_HOME@" value="${APP_HOME}" /> + <!-- updates the configuration file to locate the lubm ontology. --> + <replacefilter token="@install.lubm.config.dir@" value="${install.lubm.config.dir}" /> + </replace> + + <!-- fix newlines (otherwise substitutions cause things to break). --> + <fixcrlf srcDir="${install.config.dir}" /> + <fixcrlf srcDir="${install.bin.dir}" /> + + <!-- set execute bit for scripts in this directory (must be the last step). --> + <chmod perm="u+x,g+rx,o-rwx"> + <fileset dir="${install.bin.dir}"> + <exclude name="README" /> + <exclude name="POST-INSTALL" /> + </fileset> + </chmod> + + <!-- Setup the status file which will be read by the bigdata script and + the log on which that script will write its output. This is used + if cron, or a similar process, will execute the script on a periodic + basis. The initial state is always 'status'. The initial stateLog + is always empty. The state file must be readable by the group, but + could be restricted to write by a specific user. The stateLog must be + read/write for the group. --> + + <echo file="${stateFile}">status</echo> + <echo file="${st... [truncated message content] |
From: <tho...@us...> - 2010-09-01 16:29:22
|
Revision: 3484 http://bigdata.svn.sourceforge.net/bigdata/?rev=3484&view=rev Author: thompsonbry Date: 2010-09-01 16:29:16 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Corrected compile time error introduced into the lubm Test.java class when it was modified to use NicUtil. Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java Modified: branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java 2010-09-01 16:29:07 UTC (rev 3483) +++ branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java 2010-09-01 16:29:16 UTC (rev 3484) @@ -85,7 +85,7 @@ hostname = NicUtil.getIpAddress("default.nic", "default", false); } catch(Throwable t) {//for now, maintain same failure logic as used previously t.printStackTrace(); - s = NicUtil.getIpAddressByLocalHost(); + hostname = NicUtil.getIpAddressByLocalHost(); } QUERY_TEST_RESULT_FILE = hostname + "-result.txt"; } else { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-01 16:29:13
|
Revision: 3483 http://bigdata.svn.sourceforge.net/bigdata/?rev=3483&view=rev Author: thompsonbry Date: 2010-09-01 16:29:07 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Removed the use of the ShadowJournal from ProgramTask. It will be integrated into AbstractTask instead. Removed if(true) from Node. Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/Node.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/Node.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/Node.java 2010-09-01 16:28:00 UTC (rev 3482) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/Node.java 2010-09-01 16:29:07 UTC (rev 3483) @@ -867,7 +867,7 @@ btree.storeCache.remove(oldChildAddr); } // free the oldChildAddr if the Strategy supports it - if (true) btree.store.delete(oldChildAddr); + btree.store.delete(oldChildAddr); // System.out.println("Deleting " + oldChildAddr); // Stash reference to the new child. Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java 2010-09-01 16:28:00 UTC (rev 3482) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java 2010-09-01 16:29:07 UTC (rev 3483) @@ -37,16 +37,13 @@ import org.apache.log4j.Logger; -import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.AbstractTask; import com.bigdata.journal.BufferMode; import com.bigdata.journal.ConcurrencyManager; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.IIndexStore; -import com.bigdata.journal.IJournal; import com.bigdata.journal.ITx; import com.bigdata.journal.Journal; -import com.bigdata.journal.JournalShadow; import com.bigdata.journal.TimestampUtility; import com.bigdata.relation.IMutableRelation; import com.bigdata.relation.accesspath.ChunkConsumerIterator; @@ -506,11 +503,6 @@ */ tx = jnl.newTx(lastCommitTime); - /* - * Create the shadow journal to define the allocation context - */ - indexManager = new JournalShadow(jnl); - // the timestamp that we will read on for this step. joinNexusFactory.setReadTimestamp(TimestampUtility .asHistoricalRead(lastCommitTime)); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <fko...@us...> - 2010-09-01 16:28:06
|
Revision: 3482 http://bigdata.svn.sourceforge.net/bigdata/?rev=3482&view=rev Author: fkoliver Date: 2010-09-01 16:28:00 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Convert ontology reference from File to URL to match previous change. Modified Paths: -------------- branches/maven_scaleout/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config Modified: branches/maven_scaleout/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config =================================================================== --- branches/maven_scaleout/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config 2010-09-01 16:24:23 UTC (rev 3481) +++ branches/maven_scaleout/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config 2010-09-01 16:28:00 UTC (rev 3482) @@ -32,7 +32,7 @@ ); // The ontology to load (file or directory) when the KB is created. - ontology = new File(lubmDir,"univ-bench.owl"); + ontology = new File(lubmDir,"univ-bench.owl").toURI().toURL(); // The maximum thread pool size for RDF parser tasks. //parserPoolSize = 5; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <fko...@us...> - 2010-09-01 16:24:34
|
Revision: 3481 http://bigdata.svn.sourceforge.net/bigdata/?rev=3481&view=rev Author: fkoliver Date: 2010-09-01 16:24:23 +0000 (Wed, 01 Sep 2010) Log Message: ----------- 1) Modify bulk loading mechanism to pass URL objects around instead of File objects. 2) Provide URLListScanner class (as replacement for FileSystemScanner) which allows user to provide a list of URLs in the cluster config for the bulk loader. 3) Provide FileServer class which starts a web server for a given directory on a given port, which can be used directly from the cluster configuration file. 4) Provide FileSystemScannerServer class (as replacement for FileSystemScanner) which scans a directory (as before) but uses a provided web server to serve up the files, passing URL objects for that web server to client services. 5) Remove the IRemoteExector interface (and its use of Callable) in favor of IClientService and IDataService (and IClientServiceCallable and IDataServiceCallable). This allows tasks to have start*Task methods which take container specific arguments rather than relying on call() and mutating the tasks after deserialization. 6) If the bulk load fails with an exception, at least print a stack trace. 7) If the bulk load fails to load the ontology (for a NEW tuple store), then delete the newly created store. Modified Paths: -------------- branches/maven_scaleout/src/main/java/com/bigdata/journal/ConcurrencyManager.java branches/maven_scaleout/src/main/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java branches/maven_scaleout/src/main/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java branches/maven_scaleout/src/main/java/com/bigdata/rdf/rules/RDFJoinNexus.java branches/maven_scaleout/src/main/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/maven_scaleout/src/main/java/com/bigdata/rdf/store/DataLoader.java branches/maven_scaleout/src/main/java/com/bigdata/relation/locator/DefaultResourceLocator.java branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/AbstractStepTask.java branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/ProgramTask.java branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/pipeline/JoinTaskFactoryTask.java branches/maven_scaleout/src/main/java/com/bigdata/resources/MoveTask.java branches/maven_scaleout/src/main/java/com/bigdata/service/ClientService.java branches/maven_scaleout/src/main/java/com/bigdata/service/DataService.java branches/maven_scaleout/src/main/java/com/bigdata/service/IClientService.java branches/maven_scaleout/src/main/java/com/bigdata/service/IDataService.java branches/maven_scaleout/src/main/java/com/bigdata/service/IDataServiceCallable.java branches/maven_scaleout/src/main/java/com/bigdata/service/ListIndicesTask.java branches/maven_scaleout/src/main/java/com/bigdata/service/MetadataService.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/ClientServer.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/DataServer.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/MetadataServer.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/benchmark/ThroughputMaster.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/master/AbstractAsynchronousClientTask.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/master/AbstractClientTask.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/master/AbstractResourceScanner.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/master/DiscoverServices.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/master/IAsynchronousClientTask.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/master/IResourceScannerFactory.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/master/MappedTaskMaster.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/master/ResourceBufferTask.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/master/ServiceMap.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/master/TaskMaster.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/util/DumpFederation.java branches/maven_scaleout/src/main/java/com/bigdata/service/ndx/pipeline/AbstractPendingSetSubtask.java branches/maven_scaleout/src/test/java/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java branches/maven_scaleout/src/test/java/com/bigdata/resources/AbstractResourceManagerTestCase.java branches/maven_scaleout/src/test/java/com/bigdata/service/jini/PerformanceTest.java Added Paths: ----------- branches/maven_scaleout/src/main/java/com/bigdata/service/IClientServiceCallable.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/master/FileServer.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/master/FileSystemScannerServer.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/master/URLListScanner.java Removed Paths: ------------- branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/EmptyProgramTask.java branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/IProgramTask.java branches/maven_scaleout/src/main/java/com/bigdata/service/DataServiceCallable.java branches/maven_scaleout/src/main/java/com/bigdata/service/FederationCallable.java branches/maven_scaleout/src/main/java/com/bigdata/service/IFederationCallable.java branches/maven_scaleout/src/main/java/com/bigdata/service/IRemoteExecutor.java branches/maven_scaleout/src/main/java/com/bigdata/service/jini/master/AggregatorTask.java Modified: branches/maven_scaleout/src/main/java/com/bigdata/journal/ConcurrencyManager.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/journal/ConcurrencyManager.java 2010-09-01 14:39:13 UTC (rev 3480) +++ branches/maven_scaleout/src/main/java/com/bigdata/journal/ConcurrencyManager.java 2010-09-01 16:24:23 UTC (rev 3481) @@ -90,17 +90,17 @@ */ public class ConcurrencyManager implements IConcurrencyManager { - final protected static Logger log = Logger.getLogger(ConcurrencyManager.class); + final private static Logger log = Logger.getLogger(ConcurrencyManager.class); // /** // * True iff the {@link #log} level is INFO or less. // */ -// final protected static boolean INFO = log.isInfoEnabled(); +// final private static boolean INFO = log.isInfoEnabled(); /** * True iff the {@link #log} level is DEBUG or less. */ - final protected static boolean DEBUG = log.isDebugEnabled(); + final private static boolean DEBUG = log.isDebugEnabled(); /** * Options for the {@link ConcurrentManager}. @@ -340,7 +340,7 @@ * Once the transaction has acquired those writable indices it then runs its * commit phrase as an unisolated operation on the {@link #writeService}. */ - final protected ThreadPoolExecutor txWriteService; + final private ThreadPoolExecutor txWriteService; /** * Pool of threads for handling concurrent unisolated read operations on @@ -358,7 +358,7 @@ * historical commit records (which may span more than one logical * journal) until the reader terminates. */ - final protected ThreadPoolExecutor readService; + final private ThreadPoolExecutor readService; /** * Pool of threads for handling concurrent unisolated write operations on @@ -371,6 +371,7 @@ * Serialization of access to unisolated named indices is acomplished by * gaining an exclusive lock on the unisolated named index. */ + // protected for access by tests. final protected WriteExecutorService writeService; /** @@ -400,7 +401,7 @@ } - protected void assertOpen() { + private void assertOpen() { if (!open) throw new IllegalStateException(); @@ -466,7 +467,7 @@ * Long.MAX_VALUE. */ - final long shutdownTimeout = this.shutdownTimeout == 0L ? Long.MAX_VALUE + final long tmpShutdownTimeout = this.shutdownTimeout == 0L ? Long.MAX_VALUE : this.shutdownTimeout; final TimeUnit unit = TimeUnit.MILLISECONDS; @@ -486,7 +487,7 @@ final long elapsed = System.currentTimeMillis() - begin; - if(!txWriteService.awaitTermination(shutdownTimeout-elapsed, unit)) { + if(!txWriteService.awaitTermination(tmpShutdownTimeout-elapsed, unit)) { log.warn("Transaction service termination: timeout"); @@ -505,7 +506,7 @@ final long elapsed = System.currentTimeMillis() - begin; - if(!readService.awaitTermination(shutdownTimeout-elapsed, unit)) { + if(!readService.awaitTermination(tmpShutdownTimeout-elapsed, unit)) { log.warn("Read service termination: timeout"); @@ -521,7 +522,7 @@ final long elapsed = System.currentTimeMillis() - begin; - final long timeout = shutdownTimeout-elapsed; + final long timeout = tmpShutdownTimeout-elapsed; if (log.isInfoEnabled()) log.info("Awaiting write service termination: will wait " @@ -921,13 +922,13 @@ } /** Counters for {@link #writeService}. */ - protected final WriteTaskCounters countersUN = new WriteTaskCounters(); + final WriteTaskCounters countersUN = new WriteTaskCounters(); /** Counters for the {@link #txWriteService}. */ - protected final TaskCounters countersTX = new TaskCounters(); + final TaskCounters countersTX = new TaskCounters(); /** Counters for the {@link #readService}. */ - protected final TaskCounters countersHR = new TaskCounters(); + final TaskCounters countersHR = new TaskCounters(); /** * Sampling instruments for the various queues giving us the moving average Modified: branches/maven_scaleout/src/main/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2010-09-01 14:39:13 UTC (rev 3480) +++ branches/maven_scaleout/src/main/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2010-09-01 16:24:23 UTC (rev 3481) @@ -23,7 +23,6 @@ */ package com.bigdata.rdf.load; -import java.io.File; import java.io.FilenameFilter; import java.io.IOException; import java.io.ObjectInputStream; @@ -57,9 +56,11 @@ import com.bigdata.service.jini.JiniFederation; import com.bigdata.service.jini.master.AbstractAsynchronousClientTask; import com.bigdata.service.jini.master.ClientLocator; +import com.bigdata.service.jini.master.FileServer; import com.bigdata.service.jini.master.INotifyOutcome; import com.bigdata.service.jini.master.MappedTaskMaster; import com.bigdata.service.jini.master.TaskMaster; +import java.net.URL; /** * Distributed bulk loader for RDF data. Creates/(re-)opens the @@ -84,7 +85,7 @@ >// extends MappedTaskMaster<S, T, L, U, V> { - final protected static Logger log = Logger + final private static Logger log = Logger .getLogger(MappedRDFDataLoadMaster.class); /** @@ -263,7 +264,7 @@ */ String RDF_FORMAT = "rdfFormat"; - String DEFAULT_RDF_FORMAT = RDFFormat.RDFXML.toString(); + String DEFAULT_RDF_FORMAT = RDFFormat.RDFXML.getName(); // /** // * The maximum #of times an attempt will be made to load any given file. @@ -297,7 +298,7 @@ * * @see ConfigurationOptions#ONTOLOGY */ - public final File ontology; + public final URL ontology; /** * Only files matched by the filter will be processed (optional, but @@ -514,9 +515,9 @@ namespace = (String) config.getEntry(component, ConfigurationOptions.NAMESPACE, String.class); - ontology = (File) config + ontology = (URL) config .getEntry(component, ConfigurationOptions.ONTOLOGY, - File.class, null/* defaultValue */); + URL.class, null/* defaultValue */); ontologyFileFilter = (FilenameFilter) config.getEntry(component, ConfigurationOptions.ONTOLOGY_FILE_FILTER, @@ -583,7 +584,7 @@ final String tmp = (String) config.getEntry(component, ConfigurationOptions.RDF_FORMAT, String.class, - ConfigurationOptions.DEFAULT_RDF_FORMAT.toString()); + ConfigurationOptions.DEFAULT_RDF_FORMAT); if (tmp != null) { @@ -639,13 +640,14 @@ // execute master wait for it to finish. task.execute(); - + } catch (Throwable e) { + e.printStackTrace(); } finally { - + FileServer.stopAll(); fed.shutdown(); } - + } public MappedRDFDataLoadMaster(final JiniFederation fed) @@ -658,6 +660,7 @@ /** * Extended to support optional load, closure, and reporting. */ + @Override protected void runJob() throws Exception { final S jobState = getJobState(); @@ -851,6 +854,7 @@ /** * Extended to open/create the KB. */ + @Override protected void beginJob(final S jobState) throws Exception { super.beginJob(jobState); @@ -900,7 +904,7 @@ loadOntology(tripleStore); } catch (Exception ex) { - + tripleStore.destroy(); // Don't leave badly configured store. throw new RuntimeException("Could not load: " + jobState.ontology, ex); Modified: branches/maven_scaleout/src/main/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java 2010-09-01 14:39:13 UTC (rev 3480) +++ branches/maven_scaleout/src/main/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java 2010-09-01 16:24:23 UTC (rev 3481) @@ -20,8 +20,9 @@ import com.bigdata.rdf.rio.RDFParserOptions; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.store.ScaleOutTripleStore; -import com.bigdata.service.IRemoteExecutor; -import com.bigdata.service.jini.JiniFederation; +import com.bigdata.service.ClientService; +import com.bigdata.service.IBigdataFederation; +import com.bigdata.service.IClientService; import com.bigdata.service.jini.master.AbstractAsynchronousClientTask; import com.bigdata.service.jini.master.ClientLocator; import com.bigdata.service.jini.master.INotifyOutcome; @@ -47,12 +48,11 @@ * @param <V> * The generic type of the client state (stored in zookeeper). */ -public class MappedRDFFileLoadTask<// -S extends JobState,// -V extends Serializable,// -L extends ClientLocator// -> extends AbstractAsynchronousClientTask<Void,V,L>// -implements Serializable { +public class MappedRDFFileLoadTask<S extends JobState, + V extends Serializable, + L extends ClientLocator> + extends AbstractAsynchronousClientTask<Void,V,L>// + implements Serializable { final protected transient static Logger log = Logger .getLogger(MappedRDFFileLoadTask.class); @@ -73,7 +73,7 @@ protected final L locator; /** - * Instantiated by {@link #call()} on the {@link IRemoteExecutor} service. + * Instantiated by {@link #startClientTask()} on the {@link IClientService} service. * This is volatile because it is used by some methods which do not obtain * the {@link #lock}. */ @@ -125,7 +125,7 @@ * to the load balancer). */ private transient volatile CounterSet counters; - + private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); @@ -136,6 +136,7 @@ isDone = false; } + @Override public String toString() { return getClass().getName() + // @@ -165,18 +166,9 @@ } - /** - * The federation object used by the {@link IRemoteExecutor} on which this - * task is executing. - */ - public JiniFederation<?> getFederation() { + protected void setUp(IBigdataFederation federation) + throws InterruptedException { - return (JiniFederation<?>) super.getFederation(); - - } - - protected void setUp() throws InterruptedException { - // set transient fields. // lock = new ReentrantLock(); // allDone = lock.newCondition(); @@ -188,9 +180,9 @@ if (log.isInfoEnabled()) log.info(toString()); - final AbstractTripleStore tripleStore = (AbstractTripleStore) getFederation() - .getResourceLocator().locate(jobState.namespace, - ITx.UNISOLATED); + final AbstractTripleStore tripleStore = (AbstractTripleStore) + federation.getResourceLocator().locate(jobState.namespace, + ITx.UNISOLATED); if (tripleStore == null) { @@ -297,15 +289,14 @@ */ { - final CounterSet serviceRoot = getFederation() - .getServiceCounterSet(); + final CounterSet serviceRoot = federation.getServiceCounterSet(); final String relPath = jobState.jobName; // Create path to counter set. final CounterSet tmp = serviceRoot.makePath(relPath); - final CounterSet counters = statementBufferFactory + final CounterSet tmpCounters = statementBufferFactory .getCounters(); // if (log.isDebugEnabled()) @@ -313,7 +304,7 @@ // + counters); // Attach counters [the counters are MOVEd to tmp]. - tmp.attach(counters, true/* replace */); + tmp.attach(tmpCounters, true/* replace */); // Note reference to the current counters for log messages. this.counters = tmp; @@ -347,11 +338,12 @@ } - public Void call() throws Exception { + public Void startClientTask(IBigdataFederation federation, + ClientService clientService) throws Exception { try { - setUp(); + setUp(federation); /* * Wait until either (a) interrupted by the master using @@ -422,8 +414,8 @@ // } /** - * Block until {@link #call()} has fully initialized the instance of this - * class running on the {@link IRemoteExecutor}. This method should be used + * Block until {@link #startClientTask()} has fully initialized the instance of this + * class running on the {@link IClientService}. This method should be used * to guard methods on this or derived classes which can be invoked by RMI * and which depend on {@link #setUp()}. */ Modified: branches/maven_scaleout/src/main/java/com/bigdata/rdf/rules/RDFJoinNexus.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/rdf/rules/RDFJoinNexus.java 2010-09-01 14:39:13 UTC (rev 3480) +++ branches/maven_scaleout/src/main/java/com/bigdata/rdf/rules/RDFJoinNexus.java 2010-09-01 16:24:23 UTC (rev 3481) @@ -96,11 +96,9 @@ import com.bigdata.relation.rule.eval.AbstractSolutionBuffer; import com.bigdata.relation.rule.eval.ActionEnum; import com.bigdata.relation.rule.eval.DefaultRangeCountFactory; -import com.bigdata.relation.rule.eval.EmptyProgramTask; import com.bigdata.relation.rule.eval.IEvaluationPlanFactory; import com.bigdata.relation.rule.eval.IJoinNexus; import com.bigdata.relation.rule.eval.IJoinNexusFactory; -import com.bigdata.relation.rule.eval.IProgramTask; import com.bigdata.relation.rule.eval.IRangeCountFactory; import com.bigdata.relation.rule.eval.IRuleState; import com.bigdata.relation.rule.eval.IRuleStatisticsFactory; @@ -117,6 +115,7 @@ import com.bigdata.striterator.ChunkedArrayIterator; import com.bigdata.striterator.ChunkedConvertingIterator; import com.bigdata.striterator.DistinctFilter; +import com.bigdata.striterator.EmptyChunkedIterator; import com.bigdata.striterator.IChunkedIterator; import com.bigdata.striterator.IChunkedOrderedIterator; @@ -163,7 +162,6 @@ * {@link IAsynchronousIterator} when wrapped for RMI. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class RDFJoinNexus implements IJoinNexus { @@ -1324,10 +1322,10 @@ final IBuffer<ISolution[]> targetBuffer, final int chunkCapacity) { // MAY be null. - final IElementFilter<ISolution> filter = getSolutionFilter(); + final IElementFilter<ISolution> tmpFilter = getSolutionFilter(); return new UnsynchronizedArrayBuffer<ISolution>(targetBuffer, - chunkCapacity, filter); + chunkCapacity, tmpFilter); } @@ -1351,7 +1349,6 @@ * {@link #flush() flushed}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ * @param <E> */ public static class InsertSPOAndJustificationBuffer<E> extends AbstractSolutionBuffer<E> { @@ -1562,6 +1559,22 @@ } + // Code formerly from EmptyProgramTask. + private Object emptyProgram(ActionEnum action, IStep step) { + if (action == null) + throw new IllegalArgumentException(); + if (step == null) + throw new IllegalArgumentException(); + if (step.isRule() || ((IProgram)step).stepCount() != 0) { + throw new IllegalArgumentException(); + } + if (action.isMutation()) { + return Long.valueOf(0L); + } else { + return new EmptyChunkedIterator<ISolution>(null/* keyOrder */); + } + } + @SuppressWarnings("unchecked") public IChunkedOrderedIterator<ISolution> runQuery(final IStep step) throws Exception { @@ -1576,8 +1589,8 @@ log.warn("Empty program"); - return (IChunkedOrderedIterator<ISolution>) new EmptyProgramTask( - ActionEnum.Query, step).call(); + return (IChunkedOrderedIterator<ISolution>) + emptyProgram(ActionEnum.Query, step); } @@ -1694,7 +1707,7 @@ log.warn("Empty program"); - return (Long) new EmptyProgramTask(action, step).call(); + return (Long) emptyProgram(action, step); } @@ -1738,12 +1751,12 @@ if (step == null) throw new IllegalArgumentException(); - final IIndexManager indexManager = getIndexManager(); + final IIndexManager tmpIndexManager = getIndexManager(); - if (indexManager instanceof IBigdataFederation<?>) { + if (tmpIndexManager instanceof IBigdataFederation<?>) { // distributed program execution. - return runDistributedProgram((IBigdataFederation<?>) indexManager, + return runDistributedProgram((IBigdataFederation<?>) tmpIndexManager, action, step); } else { @@ -1766,10 +1779,10 @@ log.info("Running local program: action=" + action + ", program=" + step.getName()); - final IProgramTask innerTask = new ProgramTask(action, step, + final ProgramTask innerTask = new ProgramTask(action, step, getJoinNexusFactory(), getIndexManager()); - return innerTask.call(); + return innerTask.startDataTask(getIndexManager(), null); } @@ -1787,10 +1800,10 @@ } - final IProgramTask innerTask = new ProgramTask(action, step, + final ProgramTask innerTask = new ProgramTask(action, step, getJoinNexusFactory(), getIndexManager()); - return innerTask.call(); + return innerTask.startDataTask(fed, null); } @@ -1816,7 +1829,7 @@ } - final IProgramTask innerTask = new ProgramTask(action, step, + final ProgramTask innerTask = new ProgramTask(action, step, getJoinNexusFactory()); return dataService.submit(innerTask).get(); Modified: branches/maven_scaleout/src/main/java/com/bigdata/rdf/store/AbstractTripleStore.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/rdf/store/AbstractTripleStore.java 2010-09-01 14:39:13 UTC (rev 3480) +++ branches/maven_scaleout/src/main/java/com/bigdata/rdf/store/AbstractTripleStore.java 2010-09-01 16:24:23 UTC (rev 3481) @@ -1387,6 +1387,9 @@ if (lex != null) lex.destroy(); } + // Remove the triple store from the global row store. + getIndexManager().getGlobalRowStore().delete( + RelationSchema.INSTANCE, getNamespace()); lexiconRelation = null; Modified: branches/maven_scaleout/src/main/java/com/bigdata/rdf/store/DataLoader.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/rdf/store/DataLoader.java 2010-09-01 14:39:13 UTC (rev 3480) +++ branches/maven_scaleout/src/main/java/com/bigdata/rdf/store/DataLoader.java 2010-09-01 16:24:23 UTC (rev 3481) @@ -36,7 +36,9 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; +import java.net.URISyntaxException; import java.net.URL; +import java.net.URLConnection; import java.util.Arrays; import java.util.LinkedList; import java.util.List; @@ -739,19 +741,19 @@ log.info("loading: " + resource); // try the classpath - InputStream rdfStream = getClass().getResourceAsStream(resource); + URL url = getClass().getResource(resource); - if (rdfStream == null) { + if (url == null) { // Searching for the resource from the root of the class returned // by getClass() (relative to the class' package) failed. // Next try searching for the desired resource from the root // of the jar; that is, search the jar file for an exact match // of the input string. - rdfStream = - getClass().getClassLoader().getResourceAsStream(resource); + url = + getClass().getClassLoader().getResource(resource); - if (rdfStream == null) { + if (url == null) { /* * If we do not find as a Resource then try the file system. @@ -761,8 +763,8 @@ if(file.exists()) { - loadFiles(totals, 0/* depth */, file, baseURL, - rdfFormat, filter, endOfBatch); + loadFiles(totals, 0/* depth */, file.toURI().toURL(), + baseURL, rdfFormat, filter, endOfBatch); return; @@ -775,15 +777,16 @@ * Obtain a buffered reader on the input stream. */ - if (rdfStream == null) { + if (url == null) { throw new IOException("Could not locate resource: " + resource); } - // @todo reuse the backing buffer to minimize heap churn. + URLConnection connection = url.openConnection(); + InputStream is = connection.getInputStream(); final Reader reader = new BufferedReader( - new InputStreamReader(rdfStream) + new InputStreamReader(is) // , 20*Bytes.kilobyte32 // use a large buffer (default is 8k) ); @@ -798,9 +801,7 @@ } finally { reader.close(); - - rdfStream.close(); - + is.close(); } } @@ -826,73 +827,68 @@ * * @throws IOException */ - public LoadStats loadFiles(final File file, final String baseURI, + public LoadStats loadFiles(final URL url, final String baseURI, final RDFFormat rdfFormat, final FilenameFilter filter) throws IOException { - if (file == null) + if (url == null) throw new IllegalArgumentException(); final LoadStats totals = new LoadStats(); - loadFiles(totals, 0/* depth */, file, baseURI, rdfFormat, filter, true/* endOfBatch */ - ); + loadFiles(totals, 0/* depth */, url, baseURI, + rdfFormat, filter, true/* endOfBatch */); return totals; } protected void loadFiles(final LoadStats totals, final int depth, - final File file, final String baseURI, final RDFFormat rdfFormat, + final URL url, final String baseURI, final RDFFormat rdfFormat, final FilenameFilter filter, final boolean endOfBatch) throws IOException { - if (file.isDirectory()) { + // Legacy behavior - allow local files and directories for now, + // but data should only be loaded from outside the cluster, not + // from inside. + if (url.getProtocol().equals("file")) { + File file; + try { + file = new File(url.toURI()); + } catch (URISyntaxException ex) { + throw new IOException("Unable to decode URL", ex); + } + if (file.isDirectory()) { - if (log.isInfoEnabled()) - log.info("loading directory: " + file); + if (log.isInfoEnabled()) + log.info("loading directory: " + file); -// final LoadStats loadStats = new LoadStats(); - - final File[] files = (filter != null ? file.listFiles(filter) + final File[] files = (filter != null ? file.listFiles(filter) : file.listFiles()); - for (int i = 0; i < files.length; i++) { + for (int i = 0; i < files.length; i++) { - final File f = files[i]; + final File f = files[i]; -// final RDFFormat fmt = RDFFormat.forFileName(f.toString(), -// rdfFormat); - - loadFiles(totals, depth + 1, f, baseURI, rdfFormat, filter, + loadFiles(totals, depth + 1, f.toURI().toURL(), baseURI, + rdfFormat, filter, (depth == 0 && i < files.length ? false : endOfBatch)); - - } - - return; - - } - - final String n = file.getName(); - - RDFFormat fmt = RDFFormat.forFileName(n); - if (fmt == null && n.endsWith(".zip")) { - fmt = RDFFormat.forFileName(n.substring(0, n.length() - 4)); - } + } - if (fmt == null && n.endsWith(".gz")) { - fmt = RDFFormat.forFileName(n.substring(0, n.length() - 3)); - } + return; - if (fmt == null) // fallback - fmt = rdfFormat; + } + } + + final String n = url.getPath(); InputStream is = null; try { - is = new FileInputStream(file); + URLConnection connection = url.openConnection(); + is = connection.getInputStream(); if (n.endsWith(".gz")) { @@ -916,23 +912,19 @@ try { // baseURI for this file. @todo do we need to encode this URI? - final String s = baseURI != null ? baseURI : file.toURI() + final String s = baseURI != null ? baseURI : url.toURI() .toString(); - loadData3(totals, reader, s, fmt, endOfBatch); + loadData3(totals, reader, s, rdfFormat, endOfBatch); return; - } catch (Exception ex) { - - throw new RuntimeException("While loading: " + file, ex); - } finally { - reader.close(); - } + } catch (Exception ex) { + throw new RuntimeException("While loading: " + url, ex); } finally { if (is != null) @@ -1359,7 +1351,8 @@ // dataLoader.loadFiles(fileOrDir, null/* baseURI */, // rdfFormat, filter); - dataLoader.loadFiles(totals, 0/* depth */, fileOrDir, baseURI, + dataLoader.loadFiles(totals, 0/* depth */, + fileOrDir.toURI().toURL(), baseURI, rdfFormat, filter, true/* endOfBatch */ ); Modified: branches/maven_scaleout/src/main/java/com/bigdata/relation/locator/DefaultResourceLocator.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/relation/locator/DefaultResourceLocator.java 2010-09-01 14:39:13 UTC (rev 3480) +++ branches/maven_scaleout/src/main/java/com/bigdata/relation/locator/DefaultResourceLocator.java 2010-09-01 16:24:23 UTC (rev 3481) @@ -239,7 +239,8 @@ final Properties properties = locateResource(namespace, timestamp, foundOn); - if (properties == null) { + // Empty properties may refer to deleted resource. + if (properties == null || properties.isEmpty()) { // Not found by this locator. @@ -426,7 +427,8 @@ final Properties properties = locateResourceOn(indexManager, namespace, timestamp); - if (properties != null) { + // Empty properties may refer to deleted resource. + if (properties != null && !properties.isEmpty()) { if (INFO) { Modified: branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/AbstractStepTask.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/AbstractStepTask.java 2010-09-01 14:39:13 UTC (rev 3480) +++ branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/AbstractStepTask.java 2010-09-01 16:24:23 UTC (rev 3481) @@ -57,7 +57,6 @@ import com.bigdata.relation.rule.IRule; import com.bigdata.relation.rule.IStep; import com.bigdata.service.DataService; -import com.bigdata.service.DataServiceCallable; import com.bigdata.service.IDataServiceCallable; import com.bigdata.service.ndx.ClientIndexView; import com.bigdata.service.ndx.IClientIndex; @@ -66,7 +65,7 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ -abstract public class AbstractStepTask extends DataServiceCallable<RuleStats> +abstract public class AbstractStepTask implements IStepTask, Cloneable { protected static final transient Logger log = Logger.getLogger(AbstractStepTask.class); @@ -87,7 +86,7 @@ * notices this case and causes <i>this</i> task to be {@link #clone()}ed, * the {@link ExecutorService} set on the clone, and the clone is then * submitted to the {@link ConcurrencyManager} for the {@link DataService}. - * + * * @param action * Indicate whether this is a query or a mutation operation. * @param joinNexusFactory @@ -98,7 +97,7 @@ * @param dataService * non-<code>null</code> iff the caller is already running on * a {@link DataService}. - * + * * @throws IllegalArgumentException * if <i>action</i> is <code>null</code>. * @throws IllegalArgumentException @@ -117,40 +116,36 @@ if (joinNexusFactory == null) throw new IllegalArgumentException(); - + if (step == null) throw new IllegalArgumentException(); this.action = action; - + this.joinNexusFactory = joinNexusFactory; - + this.step = step; - + this.indexManager = indexManager; // @todo MAY be null? - - if (dataService != null) - setDataService(dataService); - } public String toString() { - + return "{" + getClass().getSimpleName() + ", action=" + action + ", step=" + step.getName() + ", joinNexusFactory=" - + joinNexusFactory + ", indexManager=" + indexManager+"}"; - + + joinNexusFactory + ", indexManager=" + indexManager+"}"; + } - + /** * Run program steps in parallel. - * + * * @param program * @param tasks - * + * * @throws InterruptedException * @throws ExecutionException - * + * * @todo adapt the {@link ClientIndexView} code so that we notice all * errors, log them all, and report them all in a single thrown * exception. note that we may be running asynchronously inside of a @@ -164,84 +159,84 @@ protected RuleStats runParallel(final IJoinNexus joinNexus, final IStep program, final List<Callable<RuleStats>> tasks) throws InterruptedException, ExecutionException { - + if (log.isInfoEnabled()) log.info("program=" + program.getName() + ", #tasks=" + tasks.size()); - + if (indexManager == null) throw new IllegalStateException(); - + final RuleStats totals = joinNexus.getRuleStatisticsFactory().newInstance(program); - + final ExecutorService service = indexManager.getExecutorService(); - + // submit tasks and await their completion. final List<Future<RuleStats>> futures = service.invokeAll(tasks); - + // verify no problems with tasks. for (Future<RuleStats> f : futures) { - + final RuleStats tmp = f.get(); - + totals.add(tmp); - + } - + if (log.isInfoEnabled()) log.info("program=" + program.getName() + ", #tasks=" + tasks.size() + " - done"); - + return totals; - + } /** * Run program steps in sequence. - * + * * @param program * @param tasks - * - * @return + * + * @return * @throws InterruptedException * @throws ExecutionException */ protected RuleStats runSequential(final IJoinNexus joinNexus, final IStep program, final List<Callable<RuleStats>> tasks) throws InterruptedException, ExecutionException { - + final int ntasks = tasks.size(); - + if (log.isInfoEnabled()) log.info("program=" + program.getName() + ", #tasks=" + ntasks); - + if (indexManager == null) throw new IllegalStateException(); - + final ExecutorService service = indexManager.getExecutorService(); - + final RuleStats totals = joinNexus.getRuleStatisticsFactory().newInstance(program); - + final Iterator<Callable<RuleStats>> itr = tasks.iterator(); - + int n = 0; - + while (itr.hasNext()) { - + final Callable<RuleStats> task = itr.next(); - + /* * Submit and wait for the future. - * + * * Note: tasks that are run in a sequential program are required to * flush the buffer so that all solutions are available for the next * step of the program. This is critical for programs that have * dependencies between their steps. -// * +// * // * Note: This is handled by the task factory. */ final RuleStats tmp = service.submit(task).get(); - + totals.add(tmp); n++; @@ -258,9 +253,9 @@ if (log.isInfoEnabled()) log.info("program=" + program.getName() + ", #tasks=" + ntasks + " - done"); - + return totals; - + } /** @@ -272,10 +267,10 @@ * layering of the {@link RuleStats} (this is due to a coupling between the * {@link RuleStats} reporting structure and the control structure for * executing the tasks). - * + * * @param program * @param tasks - * + * * @return * @throws InterruptedException * @throws ExecutionException @@ -283,21 +278,21 @@ protected RuleStats runOne(final IJoinNexus joinNexus, final IStep program, final Callable<RuleStats> task) throws InterruptedException, ExecutionException { - + if (log.isInfoEnabled()) log.info("program=" + program.getName()); - + if (indexManager == null) throw new IllegalStateException(); - + /* * Submit and wait for the future. - * + * * Note: tasks that are run in a sequential (or as a single task) * program are required to flush the buffer so that all solutions are * available for the next step of the program. This is critical for * programs that have dependencies between their steps. - * + * * Note: This is handled by the task factory. */ // final ExecutorService service = indexManager.getExecutorService(); @@ -311,9 +306,9 @@ if (log.isInfoEnabled()) log.info("program=" + program.getName() + " - done"); - + return stats; - + } /** @@ -337,21 +332,21 @@ * {@link AbstractTask} will wind up using an {@link IClientIndex} view and * lose the benefits of access to unisolated indices. */ - public Future<RuleStats> submit() { + public Future<RuleStats> submit(DataService dataService) { - if (!isDataService()) { + if (dataService == null) { return indexManager.getExecutorService().submit(this); } - return submitToConcurrencyManager(); - + return submitToConcurrencyManager(dataService); + } - - private Future<RuleStats> submitToConcurrencyManager() { - - if (!isDataService()) + + private Future<RuleStats> submitToConcurrencyManager(DataService dataService) { + + if (dataService == null) throw new IllegalStateException(); final ProgramUtility util = new ProgramUtility(); @@ -374,40 +369,40 @@ } } - + if(log.isInfoEnabled()) { log.info("running w/ concurrency control: " + this); - + } /* * The index names must be gathered from each relation on which the task * will write so that they can be declared. - * + * * Note: We can't just pick and choose using the access paths since we * do not know how the propagation of bindings will effect access path * selection so we need a lock on all of the indices before the task can * run (at least, before it can run if it is a writer - no locks are * required for query). - * + * * 1. Find the distinct relations that are used by the rules. - * + * * 2. Collect the names of the indices maintained by those relations. - * + * * 3. Declare the indices since the task will need an exclusive lock on * them (mutation) or at least the ability to read from those indices * (query). - * + * * Note: if an index is not found on the live journal then it will be * resolved against the federation (if running in a federation). This * means that the task will run with the live index objects when they * are local and with IClientIndex objects when the index is remote. - * + * * Note: In general, mixtures of live and remote index objects do not * occur since indices are either partitioned (a federation) or * monolithic (a Journal). - * + * * Note: You CAN place indices onto specific data services running on a * set of machines and set [enableOverflow := false] such that the * indices never become partitioned. In that case you can have optimized @@ -421,34 +416,34 @@ // final long timestamp; // { -// +// // // flyweight instance. // IJoinNexus joinNexus = joinNexusFactory.newInstance(indexManager); -// +// // // choose timestamp based on more recent view required. // timestamp = action.isMutation() ? joinNexus.getWriteTimestamp() // : joinNexus.getReadTimestamp(); -// +// // } // // if(log.isInfoEnabled()) { -// +// // log.info("timestamp="+timestamp+", task="+this); -// +// // } /* * The set of indices that we need to declare for the task. */ final Set<String> indexNames = new HashSet<String>(); - + if(action.isMutation()) { - + /* * Obtain the name of each index for which we want write access. * These are the indices for the relations named in the head of each * rule. - * + * * Note: We are not actually issuing any tasks here, just * materializing relation views so that we can obtain the names of * the indices required for those views in order to declare them to @@ -459,11 +454,11 @@ */ final Map<String, IRelation> tmpRelations = getWriteRelations( indexManager, step, ITx.UNISOLATED); - + // Collect names of the required indices. final Set<String> writeIndexNames = getIndexNames(tmpRelations .values()); - + indexNames.addAll(writeIndexNames); } @@ -474,7 +469,7 @@ * Obtain the name of each index for which we want read access. * These are the indices for the relation view(s) named in the tails * of each rule. - * + * * Note: We are not actually issuing any tasks here, just * materializing relation views so that we can obtain the names of * the indices required for those views. UNISOLATED is always safe @@ -483,18 +478,18 @@ */ final Map<String, IRelation> tmpRelations = getReadRelations( indexManager, step, ITx.UNISOLATED); - + // Collect names of the required indices. final Set<String> readIndexNames = getIndexNames(tmpRelations .values()); - + indexNames.addAll(readIndexNames); - + } - + final String[] resource; { - + // The set of indices that the task will declare. resource = indexNames.toArray(new String[] {}); @@ -511,18 +506,18 @@ * choice is whether or not the task is UNISOLATED (an unisolated task * will obtain exclusive locks on the live indices declared by the * task). - * + * * A mutation task runs with the writeTimestamp. - * + * * A query task runs with the readTimestamp. - * + * * @todo handle transactions in this context. */ final long timestamp; { - + // final IJoinNexus joinNexus = joinNexusFactory.newInstance(indexManager); - + if (action.isMutation()) { timestamp = joinNexusFactory.getWriteTimestamp(); @@ -531,7 +526,7 @@ timestamp = joinNexusFactory.getReadTimestamp(); // timestamp = ITx.READ_COMMITTED; - + } if (log.isInfoEnabled()) { @@ -539,21 +534,21 @@ log.info("timestamp=" + timestamp + ", task=" + this); } - + } - + /* * Create the inner task. A clone is used to prevent possible side * effects on the original task. - * + * * Note: The [timestamp] was choosen above. The writeTimestamp iff this * is a mutation operation and the [readTimestamp] otherwise. */ final AbstractStepTask innerTask = this.clone(); - final IConcurrencyManager concurrencyManager = getDataService() - .getConcurrencyManager(); - + final IConcurrencyManager concurrencyManager = + dataService.getConcurrencyManager(); + final AbstractTask task = new AbstractTask(concurrencyManager, timestamp, resource) { @@ -570,7 +565,7 @@ * them and are running an UNISOLATED AbstractTask). */ innerTask.indexManager = getJournal(); - + return innerTask.call(); } @@ -580,9 +575,9 @@ if(log.isInfoEnabled()) { log.info("running on concurrencyManager: " + this); - + } - + /* * Run on the concurrency manager. */ @@ -618,20 +613,20 @@ protected Set<String> getWriteRelationNames(IStep step) { final Set<String> c = new HashSet<String>(); - + getWriteRelationNames(step, c); if(log.isDebugEnabled()) { - + log.debug("Found " + c.size() + " relations, program=" + step.getName()); - + } return c; - + } - + private void getWriteRelationNames(IStep p, Set<String> c) { if (p.isRule()) { @@ -641,11 +636,11 @@ if (r.getHead() == null) throw new IllegalArgumentException( "No head for this rule: rule=" + p); - + c.add(r.getHead().getOnlyRelationName()); } else { - + final Iterator<IStep> itr = ((IProgram)p).steps(); while (itr.hasNext()) { @@ -657,15 +652,15 @@ } } - + /** * Locate the distinct relation identifiers corresponding to the head of * each rule and resolve them to their relations. - * + * * @param timestamp * The timestamp associated with the relation views on which the * rule(s) will write. - * + * * @throws RuntimeException * if any relation can not be resolved. */ @@ -711,7 +706,7 @@ } } else { - + final Iterator<IStep> itr = ((IProgram)p).steps(); while (itr.hasNext()) { @@ -723,12 +718,12 @@ } } - + /** * Locate the distinct relation identifiers corresponding to the tail(s) of * each rule and resolve them to their relations. Note that a tail predicate * can read on a fused view of more than one relation. - * + * * @throws RuntimeException * if any relation can not be resolved. */ @@ -784,11 +779,11 @@ } } - + } } else { - + final Iterator<IStep> itr = ((IProgram)p).steps(); while (itr.hasNext()) { @@ -800,13 +795,13 @@ } } - + /** * Create the appropriate buffers to absorb writes by the rules in the * program that target an {@link IMutableRelation}. - * + * * @return the map from relation identifier to the corresponding buffer. - * + * * @throws IllegalStateException * if the program is being executed as mutation. * @throws RuntimeException @@ -820,13 +815,13 @@ if (!action.isMutation()) { throw new IllegalStateException(); - + } if(log.isDebugEnabled()) { - + log.debug(""); - + } final Map<String, IBuffer<ISolution[]>> c = new HashMap<String, IBuffer<ISolution[]>>( @@ -846,45 +841,45 @@ final IBuffer<ISolution[]> buffer; switch (action) { - + case Insert: - + buffer = joinNexus.newInsertBuffer((IMutableRelation)relation); - + break; - + case Delete: - + buffer = joinNexus.newDeleteBuffer((IMutableRelation)relation); - + break; - + default: - + throw new AssertionError("action=" + action); - + } c.put(relationIdentifier, buffer); - + } if(log.isDebugEnabled()) { - + log.debug("Created "+c.size()+" mutation buffers: action="+action); - + } return c; - + } - + /** * Returns the names of the indices maintained by the relations. - * + * * @param c * A collection of {@link IRelation}s. - * + * * @return The names of the indices maintained by those relations. */ @SuppressWarnings("unchecked") @@ -897,19 +892,19 @@ return Collections.EMPTY_SET; final Set<String> set = new HashSet<String>(); - + final Iterator<IRelation> itr = c.iterator(); - + while(itr.hasNext()) { - + final IRelation relation = itr.next(); - + set.addAll(relation.getIndexNames()); - + } return set; - + } } Deleted: branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/EmptyProgramTask.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/EmptyProgramTask.java 2010-09-01 14:39:13 UTC (rev 3480) +++ branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/EmptyProgramTask.java 2010-09-01 16:24:23 UTC (rev 3481) @@ -1,64 +0,0 @@ -package com.bigdata.relation.rule.eval; - -import com.bigdata.relation.rule.IProgram; -import com.bigdata.relation.rule.IStep; -import com.bigdata.striterator.EmptyChunkedIterator; - - -/** - * Provides execution for an "empty" program. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ -public class EmptyProgramTask implements IProgramTask { - - final ActionEnum action; - - final IStep program; - - /** - * - * @param action - * @param step - * - * @throws IllegalArgumentException - * if any argument is <code>null</code>. - * @throws IllegalArgumentException - * unless the <i>step</i> is an empty {@link IProgram}. - */ - public EmptyProgramTask(ActionEnum action, IStep step) { - - if (action == null) - throw new IllegalArgumentException(); - - if (step == null) - throw new IllegalArgumentException(); - - if (step.isRule() || ((IProgram)step).stepCount() != 0) { - - throw new IllegalArgumentException(); - - } - - this.action = action; - - this.program = step; - - } - - public Object call() { - - if (action.isMutation()) { - - return Long.valueOf(0L); - - } else { - - return new EmptyChunkedIterator<ISolution>(null/* keyOrder */); - - } - - } - -} \ No newline at end of file Deleted: branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/IProgramTask.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/IProgramTask.java 2010-09-01 14:39:13 UTC (rev 3480) +++ branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/IProgramTask.java 2010-09-01 16:24:23 UTC (rev 3481) @@ -1,54 +0,0 @@ -/* - -Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ -/* - * Created on Jun 27, 2008 - */ - -package com.bigdata.relation.rule.eval; - -import java.util.concurrent.Callable; - -import com.bigdata.striterator.IChunkedOrderedIterator; - -/** - * Interface for a task that executes a (complex) program (vs a single rule). - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ -public interface IProgramTask extends Callable<Object> { - - /** - * The return will be either an {@link IChunkedOrderedIterator} (for - * {@link ActionEnum#Query}) or a {@link Long} element mutation count (for - * {@link ActionEnum#Insert} or {@link ActionEnum#Delete}). - * - * @return - * - * @throws Exception - */ - public Object call() throws Exception; - -} Modified: branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/ProgramTask.java =================================================================== --- branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/ProgramTask.java 2010-09-01 14:39:13 UTC (rev 3480) +++ branches/maven_scaleout/src/main/java/com/bigdata/relation/rule/eval/ProgramTask.java 2010-09-01 16:24:23 UTC (rev 3481) @@ -30,7 +30,6 @@ import java.io.IOException; import java.util.Iterator; -import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; @@ -51,16 +50,16 @@ import com.bigdata.relation.rule.IRule; import com.bigdata.relation.rule.IStep; import com.bigdata.service.DataService; -import com.bigdata.service.DataServiceCallable; import com.bigdata.service.IBigdataFederation; import com.bigdata.service.IDataService; +import com.bigdata.service.IDataServiceCallable; import com.bigdata.striterator.IChunkedOrderedIterator; import com.bigdata.striterator.ICloseableIterator; /** * Task for executing a program when all of the indices for the relation are * co-located on the same {@link DataService}. - * + * * @todo Named result sets. This would provide a means to run a IRuleTask and * cache the output for further evaluation... [truncated message content] |
From: <mar...@us...> - 2010-09-01 14:39:19
|
Revision: 3480 http://bigdata.svn.sourceforge.net/bigdata/?rev=3480&view=rev Author: martyncutcher Date: 2010-09-01 14:39:13 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Add tests for AllocationContexts Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2010-09-01 14:27:44 UTC (rev 3479) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2010-09-01 14:39:13 UTC (rev 3480) @@ -44,7 +44,9 @@ import com.bigdata.journal.AbstractRestartSafeTestCase; import com.bigdata.journal.BufferMode; import com.bigdata.journal.DiskOnlyStrategy; +import com.bigdata.journal.IAllocationContext; import com.bigdata.journal.Journal; +import com.bigdata.journal.JournalShadow; import com.bigdata.journal.RWStrategy; import com.bigdata.journal.TestJournalBasics; import com.bigdata.journal.Journal.Options; @@ -380,10 +382,10 @@ RWStore rw = bufferStrategy.getRWStore(); ArrayList<Integer> sizes = new ArrayList<Integer>(); TreeMap<Long, Integer> paddrs = new TreeMap<Long, Integer>(); - for (int i = 0; i < 1000000; i++) { + for (int i = 0; i < 100000; i++) { int s = r.nextInt(250); sizes.add(s); - int a = rw.alloc(s); + int a = rw.alloc(s, null); long pa = rw.physicalAddress(a); assertTrue(paddrs.get(pa) == null); paddrs.put(pa, a); @@ -392,7 +394,7 @@ for (int i = 0; i < 50; i++) { int s = r.nextInt(500); sizes.add(s); - int a = rw.alloc(s); + int a = rw.alloc(s, null); long pa = rw.physicalAddress(a); paddrs.put(pa, a); } @@ -442,9 +444,9 @@ } long allocBatch(RWStore rw, int bsize, int asze, int ainc) { - long curAddress = rw.physicalAddress(rw.alloc(asze)); + long curAddress = rw.physicalAddress(rw.alloc(asze, null)); for (int i = 1; i < bsize; i++) { - int a = rw.alloc(asze); + int a = rw.alloc(asze, null); long nxt = rw.physicalAddress(a); assertTrue("Problem with index: " + i, (curAddress+ainc) == nxt || (nxt % 8192 == 0)); curAddress = nxt; @@ -460,7 +462,7 @@ r.nextBytes(batchBuffer); for (int i = 0; i < bsize; i++) { int as = base + r.nextInt(scope); - retaddrs[i] = (int) rw.alloc(batchBuffer, as); + retaddrs[i] = (int) rw.alloc(batchBuffer, as, null); } return retaddrs; @@ -499,12 +501,12 @@ private long reallocBatch(RWStore rw, int tsts, int sze, int grp) { long[] addr = new long[grp]; for (int i = 0; i < grp; i++) { - addr[i] = rw.alloc(sze); + addr[i] = rw.alloc(sze, null); } for (int t = 0; t < tsts; t++) { for (int i = 0; i < grp; i++) { long old = addr[i]; - addr[i] = rw.alloc(sze); + addr[i] = rw.alloc(sze, null); rw.free(old, sze); } } @@ -711,7 +713,6 @@ showStore(store); store.close(); - System.out.println("Re-open Journal"); store = (Journal) getStore(); showStore(store); @@ -843,11 +844,15 @@ // now delete the memory bs.delete(faddr); + // since deferred frees, we must commit in order to ensure the + // address in invalid, indicating it is available for + bs.commit(); + try { rdBuf = bs.read(faddr); // should fail with illegal state throw new RuntimeException("Fail"); } catch (Exception ise) { - assertTrue("Expected IllegalStateException", ise instanceof IllegalStateException); + assertTrue("Expected IllegalStateException reading from " + (faddr >> 32) + " instead got: " + ise, ise instanceof IllegalStateException); } } finally { @@ -932,9 +937,97 @@ store.destroy(); } } + + static class DummyAllocationContext implements IAllocationContext { + static int s_id = 23; + + int m_id = s_id++; - public void test_stressAlloc() { + public int compareTo(Object o) { + if (o instanceof DummyAllocationContext) { + return m_id - ((DummyAllocationContext) o).m_id; + } else { + return -1; + } + } + + public long minimumReleaseTime() { + return 0; // indicates immediate release + } + + } + + /** + * From a RWStore, creates multiple AllocationContexts to isolate + * updates, re-allocate storage and protect against by concurrent + * Contexts. This is the core functionality required to support + * Transactions. + * + * If an allocation is made for an AllocationContext then this will + * result in a ContextAllocation object being created in the RWStore + * within which "shadow" allocations can be made. If such a shadow + * allocation is deleted, within the AllocationContext, then this + * can be removed immediately. + * + * @throws IOException + */ + public void test_allocationContexts() throws IOException { + Journal store = (Journal) getStore(); + + RWStrategy bs = (RWStrategy) store.getBufferStrategy(); + + RWStore rw = bs.getRWStore(); + // JournalShadow shadow = new JournalShadow(store); + + // Create a couple of contexts + IAllocationContext allocContext1 = new DummyAllocationContext(); + IAllocationContext allocContext2 = new DummyAllocationContext(); + + int sze = 650; + byte[] buf = new byte[sze+4]; // extra for checksum + r.nextBytes(buf); + + long addr1a = bs.write(ByteBuffer.wrap(buf), allocContext1); + long addr1b = bs.write(ByteBuffer.wrap(buf), allocContext1); + rw.detachContext(allocContext1); + + + long addr2a = bs.write(ByteBuffer.wrap(buf), allocContext2); + long addr2b = bs.write(ByteBuffer.wrap(buf), allocContext2); + rw.detachContext(allocContext2); + + // Re-establish context + long addr1c = bs.write(ByteBuffer.wrap(buf), allocContext1); + + // By detaching contexts we end up using the same allocator + assertTrue("allocator re-use", bs.getPhysicalAddress(addr1c) > bs.getPhysicalAddress(addr2b)); + + // Now, prior to commit, try deleting an uncommitted allocation + bs.delete(addr1c, allocContext1); + // and re-allocating it from the same context + long addr1d = bs.write(ByteBuffer.wrap(buf), allocContext1); + + assertTrue("re-allocation", addr1c==addr1d); + + rw.detachContext(allocContext1); + + // Now commit + store.commit(); + + // now try deleting and re-allocating again, but in a global context + bs.delete(addr1d); // this should call deferFree + long addr1e = bs.write(ByteBuffer.wrap(buf), allocContext1); + + assertTrue("deferred-delete", addr1e != addr1d); + + // Now commit + store.commit(); + + } + + public void test_stressAlloc() { + Journal store = (Journal) getStore(); RWStrategy bs = (RWStrategy) store.getBufferStrategy(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2010-09-01 14:27:53
|
Revision: 3479 http://bigdata.svn.sourceforge.net/bigdata/?rev=3479&view=rev Author: martyncutcher Date: 2010-09-01 14:27:44 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Support RootBlock reference from CommitRecords Added Paths: ----------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockCommitter.java Added: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockCommitter.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockCommitter.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockCommitter.java 2010-09-01 14:27:44 UTC (rev 3479) @@ -0,0 +1,61 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.journal; + +import java.nio.ByteBuffer; + +/** + * Provides the callback to save the previous root block and store the address + * with the current CommitRecord. This enables access to historical root blocks + * since the next CommitRecord is accessible from the CommitRecordIndex. This + * is effective if slightly circuitious. + * + * @author Martyn Cutcher + * + */ +public class RootBlockCommitter implements ICommitter { + final AbstractJournal journal; + + public RootBlockCommitter(AbstractJournal journal) { + this.journal = journal; + } + + /** + * Write the current root block to the Journal and return its address + * to be stored in the CommitRecord. + */ + public long handleCommit(long commitTime) { + ByteBuffer rbv = journal.getRootBlockView().asReadOnlyBuffer(); + + ByteBuffer bb = ByteBuffer.allocate(rbv.capacity()); + for (int i = 0; i < rbv.capacity(); i++) { + bb.put(rbv.get()); + } + bb.flip(); + + return journal.write(bb); + } + +} Property changes on: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockCommitter.java ___________________________________________________________________ Added: svn:mime-type + text/plain Added: svn:keywords + Id Date Revision Author HeadURL This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2010-09-01 14:22:45
|
Revision: 3478 http://bigdata.svn.sourceforge.net/bigdata/?rev=3478&view=rev Author: martyncutcher Date: 2010-09-01 14:22:39 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Support for CommitRecord linked delete blocks Added Paths: ----------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/DeleteBlockCommitter.java Added: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/DeleteBlockCommitter.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/DeleteBlockCommitter.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/DeleteBlockCommitter.java 2010-09-01 14:22:39 UTC (rev 3478) @@ -0,0 +1,45 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.journal; + +/** + * Defines the callback object called on commit that enables the deferred + * delete blocks to be associated with a CommitRecord. + * + * @author Martyn Cutcher + * + */ +public class DeleteBlockCommitter implements ICommitter { + + private RWStrategy m_strategy; + + public DeleteBlockCommitter(RWStrategy strategy) { + m_strategy = strategy; + } + public long handleCommit(long commitTime) { + return m_strategy.saveDeleteBlocks(); + } + +} Property changes on: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/DeleteBlockCommitter.java ___________________________________________________________________ Added: svn:mime-type + text/plain Added: svn:keywords + Id Date Revision Author HeadURL This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2010-09-01 14:19:14
|
Revision: 3477 http://bigdata.svn.sourceforge.net/bigdata/?rev=3477&view=rev Author: martyncutcher Date: 2010-09-01 14:19:05 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Commit inital implementaitons of shadow allocations and CommitRecord-based deleteBlocks Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/Node.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/CommitRecordIndex.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/FileMetadata.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IAtomicStore.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IBufferStrategy.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalTransactionService.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/WORMStrategy.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/AbstractRawStore.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/IRawStore.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/Allocator.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/BlobAllocator.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedOutputStream.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/IStore.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/PSOutputStream.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java Added Paths: ----------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IAllocationContext.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalDelegate.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalShadow.java Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/Node.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/Node.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/btree/Node.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -862,12 +862,13 @@ // Clear the old key. data.childAddr[i] = NULL; - if(btree.storeCache!=null) { + if (btree.storeCache!=null) { // remove from cache. btree.storeCache.remove(oldChildAddr); } // free the oldChildAddr if the Strategy supports it - btree.store.delete(oldChildAddr); + if (true) btree.store.delete(oldChildAddr); + // System.out.println("Deleting " + oldChildAddr); // Stash reference to the new child. // childRefs[i] = btree.newRef(newChild); Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -1436,7 +1436,7 @@ final WriteCache cache = acquireForWriter(); try { - debugAddrs(offset, 0, 'A'); + debugAddrs(offset, data.remaining(), 'A'); // write on the cache. if (cache.write(offset, data, chk, useChecksum)) { @@ -1982,7 +1982,7 @@ } if (addrsUsed[i] == paddr) { ret.append(addrActions[i]); - if (addrActions[i]=='W') { + if (addrActions[i]=='A') { ret.append("[" + addrLens[i] + "]"); } } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -677,4 +677,8 @@ public void setTransactionManager(AbstractLocalTransactionManager localTransactionManager) { // NOP } + + public void setCommitRecordIndex(CommitRecordIndex commitRecordIndex) { + // NOP + } } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -37,6 +37,7 @@ import java.nio.ByteBuffer; import java.nio.channels.Channel; import java.nio.channels.FileChannel; +import java.util.Iterator; import java.util.Properties; import java.util.UUID; import java.util.concurrent.Callable; @@ -222,6 +223,18 @@ public static transient final int ROOT_NAME2ADDR = 0; /** + * The index of the address where the root block copy from the previous + * commit is stored + */ + public static transient final int PREV_ROOTBLOCK = 1; + + /** + * The index of the address of the delete blocks associated with + * this transaction + */ + public static transient final int DELETEBLOCK = 2; + + /** * A clone of the properties used to initialize the {@link Journal}. */ final protected Properties properties; @@ -520,6 +533,73 @@ } /** + * Return the root block view associated with the commitRecord for the + * provided commit time. This requires accessing the next commit record + * since the previous root block is stored with each record. + * + * @param commitTime + * A commit time. + * + * @return The root block view -or- <code>null</code> if there is no commit + * record for that commitTime. + * + */ + public IRootBlockView getRootBlock(final long commitTime) { + + final ICommitRecord commitRecord = getCommitRecordIndex().findNext(commitTime); + + if (commitRecord == null) { + return null; + } + + final long rootBlockAddr = commitRecord.getRootAddr(PREV_ROOTBLOCK); + + if (rootBlockAddr == 0) { + return null; + } else { + ByteBuffer bb = read(rootBlockAddr); + + return new RootBlockView(true /* rb0 - WTH */, bb, checker); + } + + } + + /** + * + * @param startTime from which to begin iteration + * + * @return an iterator over the committed root blocks + */ + public Iterator<IRootBlockView> getRootBlocks(final long startTime) { + return new Iterator<IRootBlockView>() { + ICommitRecord commitRecord = getCommitRecordIndex().findNext(startTime); + + public boolean hasNext() { + return commitRecord != null; + } + + public IRootBlockView next() { + final long rootBlockAddr = commitRecord.getRootAddr(PREV_ROOTBLOCK); + + commitRecord = getCommitRecordIndex().findNext(commitRecord.getTimestamp()); + + if (rootBlockAddr == 0) { + return null; + } else { + ByteBuffer bb = read(rootBlockAddr); + + return new RootBlockView(true /* rb0 - WTH */, bb, checker); + } + } + + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + } + + /** * True iff the journal was opened in a read-only mode. */ private final boolean readOnly; @@ -905,6 +985,8 @@ _bufferStrategy = new RWStrategy(fileMetadata, quorum); this._rootBlock = fileMetadata.rootBlock; + + setCommitter(DELETEBLOCK, new DeleteBlockCommitter((RWStrategy) _bufferStrategy)); break; @@ -961,6 +1043,8 @@ // report event. ResourceManager.openJournal(getFile() == null ? null : getFile().toString(), size(), getBufferStrategy() .getBufferMode()); + + this._bufferStrategy.setCommitRecordIndex(_commitRecordIndex); } finally { @@ -2017,6 +2101,8 @@ // clear reference and reload from the store. _commitRecordIndex = _getCommitRecordIndex(); + + _bufferStrategy.setCommitRecordIndex(_commitRecordIndex); // clear the array of committers. _committers = new ICommitter[_committers.length]; @@ -2509,7 +2595,7 @@ public long write(final ByteBuffer data) { - assertCanRead(); + assertCanWrite(); return _bufferStrategy.write(data); @@ -2524,7 +2610,15 @@ } - // Note: NOP for WORM. Used by RW for eventual recycle protocol. + public long write(ByteBuffer data, final long oldAddr, IAllocationContext context) { + return _bufferStrategy.write(data, oldAddr, context); + } + + public long write(ByteBuffer data, IAllocationContext context) { + return _bufferStrategy.write(data, context); + } + + // Note: NOP for WORM. Used by RW for eventual recycle protocol. public void delete(final long addr) { assertCanWrite(); @@ -2533,6 +2627,18 @@ } + public void delete(final long addr, IAllocationContext context) { + + assertCanWrite(); + + _bufferStrategy.delete(addr, context); + + } + + public void detachContext(IAllocationContext context) { + _bufferStrategy.detachContext(context); + } + final public long getRootAddr(final int index) { final ReadLock lock = _fieldReadWriteLock.readLock(); @@ -2660,6 +2766,11 @@ */ setupName2AddrBTree(getRootAddr(ROOT_NAME2ADDR)); + + /** + * Register committer to write previous root block + */ + setCommitter(PREV_ROOTBLOCK, new RootBlockCommitter(this)); } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -34,6 +34,7 @@ import java.nio.ByteBuffer; import java.util.Arrays; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; import java.util.Properties; import java.util.UUID; @@ -2483,10 +2484,33 @@ } public void delete(long addr) { - // void - + delegate.delete(addr); } + public IRootBlockView getRootBlock(long commitTime) { + return delegate.getRootBlock(commitTime); + } + + public Iterator<IRootBlockView> getRootBlocks(long startTime) { + return delegate.getRootBlocks(startTime); + } + + public void delete(long addr, IAllocationContext context) { + delegate.delete(addr, context); + } + + public long write(ByteBuffer data, IAllocationContext context) { + return delegate.write(data, context); + } + + public long write(ByteBuffer data, long oldAddr, IAllocationContext context) { + return delegate.write(data, oldAddr, context); + } + + public void detachContext(IAllocationContext context) { + delegate.detachContext(context); + } + } /** @@ -2861,6 +2885,30 @@ } + public IRootBlockView getRootBlock(long commitTime) { + return delegate.getRootBlock(commitTime); + } + + public Iterator<IRootBlockView> getRootBlocks(long startTime) { + return delegate.getRootBlocks(startTime); + } + + public void delete(long addr, IAllocationContext context) { + throw new UnsupportedOperationException(); + } + + public long write(ByteBuffer data, IAllocationContext context) { + throw new UnsupportedOperationException(); + } + + public long write(ByteBuffer data, long oldAddr, IAllocationContext context) { + throw new UnsupportedOperationException(); + } + + public void detachContext(IAllocationContext context) { + delegate.detachContext(context); + } + } /** Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/CommitRecordIndex.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/CommitRecordIndex.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/CommitRecordIndex.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -26,6 +26,8 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; +import java.util.Iterator; +import java.util.NoSuchElementException; import java.util.UUID; import com.bigdata.btree.BTree; @@ -696,4 +698,34 @@ } // CommitRecordIndexTupleSerializer + public Iterator<ICommitRecord> getCommitRecords(final long fromTime, final long toTime) { + return new Iterator<ICommitRecord>() { + ICommitRecord m_next = findNext(fromTime); + + public boolean hasNext() { + return m_next != null; + } + + public ICommitRecord next() { + if (m_next == null) { + throw new NoSuchElementException(); + } + + ICommitRecord ret = m_next; + m_next = findNext(ret.getTimestamp()); + + if (m_next != null && m_next.getTimestamp() > toTime) { + m_next = null; + } + + return ret; + } + + public void remove() { + throw new RuntimeException("Invalid Operation"); + } + + }; + } + } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -2559,5 +2559,9 @@ public void setNextOffset(long lastOffset) { // void for standard Disk strategy } + + public void setCommitRecordIndex(CommitRecordIndex commitRecordIndex) { + // NOP + } } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/FileMetadata.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/FileMetadata.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/FileMetadata.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -1245,8 +1245,8 @@ } - if (log.isDebugEnabled()) - log.debug("Writing ROOTBLOCK with commitCounter: " + rootBlock.getCommitCounter() + if (log.isTraceEnabled()) + log.trace("Writing ROOTBLOCK with commitCounter: " + rootBlock.getCommitCounter() + ", commitRecordIndexAddr: " + rootBlock.getCommitRecordIndexAddr() + ", commitRecordAddr: " + rootBlock.getCommitRecordAddr()); } Added: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IAllocationContext.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IAllocationContext.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IAllocationContext.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -0,0 +1,41 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.journal; + +/** + * An IAllocationContext defines a shadow environment which may be + * associated with allocations made during a transaction. + * + * @author Martyn Cutcher + * + */ +public interface IAllocationContext extends Comparable { + + /** + * @return the minimum release time for any freed allocations + */ + long minimumReleaseTime(); + +} Property changes on: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IAllocationContext.java ___________________________________________________________________ Added: svn:mime-type + text/plain Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IAtomicStore.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IAtomicStore.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IAtomicStore.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -27,6 +27,8 @@ package com.bigdata.journal; +import java.util.Iterator; + import com.bigdata.rawstore.IRawStore; /** @@ -111,4 +113,24 @@ */ public ICommitRecord getCommitRecord(long timestamp); + /** + * Return the root block view associated with the commitRecord for the + * provided commit time. This requires accessing the next commit record + * since it is the previous root block that is referenced from each record. + * + * @param commitTime + * A commit time. + * + * @return The root block view -or- <code>null</code> if there is no commit + * record for that commitTime. + */ + public IRootBlockView getRootBlock(final long commitTime); + + /** + * + * @param startTime from which to begin iteration + * + * @return an iterator over the committed root blocks + */ + public Iterator<IRootBlockView> getRootBlocks(final long startTime); } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IBufferStrategy.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IBufferStrategy.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/IBufferStrategy.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -270,4 +270,17 @@ */ public void setTransactionManager(AbstractLocalTransactionManager localTransactionManager); + + /** + * Needed to enable transaction support for standalone buffer strategies. + * + * The WORMStrategy does not need this since no data is ever deleted, but + * the RWStrategy must manage deletions and needs access to the historical + * commitRecords which reference the blocks of deferred deleted addresses. + * + * @param commitRecordIndex + * The CommitRecordIndex for the owning Journal + */ + public void setCommitRecordIndex(CommitRecordIndex commitRecordIndex); + } Added: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalDelegate.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalDelegate.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalDelegate.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -0,0 +1,245 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.journal; + +import java.io.File; +import java.nio.ByteBuffer; +import java.util.Iterator; +import java.util.Properties; +import java.util.UUID; +import java.util.concurrent.ExecutorService; + +import com.bigdata.bfs.BigdataFileSystem; +import com.bigdata.btree.BTree; +import com.bigdata.btree.IIndex; +import com.bigdata.btree.IndexMetadata; +import com.bigdata.counters.CounterSet; +import com.bigdata.mdi.IResourceMetadata; +import com.bigdata.relation.locator.IResourceLocator; +import com.bigdata.sparse.SparseRowStore; + +public class JournalDelegate implements IJournal { + final IJournal delegate; + + public JournalDelegate(final IJournal source) { + this.delegate = source; + } + + public Properties getProperties() { + return delegate.getProperties(); + } + + public void shutdown() { + delegate.shutdown(); + } + + public void shutdownNow() { + delegate.shutdownNow(); + } + + public void abort() { + delegate.abort(); + } + + public long commit() { + return delegate.commit(); + } + + public ICommitRecord getCommitRecord(long timestamp) { + return delegate.getCommitRecord(timestamp); + } + + public long getRootAddr(int index) { + return delegate.getRootAddr(index); + } + + public IRootBlockView getRootBlock(long commitTime) { + return delegate.getRootBlock(commitTime); + } + + public IRootBlockView getRootBlockView() { + return delegate.getRootBlockView(); + } + + public Iterator<IRootBlockView> getRootBlocks(long startTime) { + return delegate.getRootBlocks(startTime); + } + + public void setCommitter(int index, ICommitter committer) { + delegate.setCommitter(index, committer); + } + + public void close() { + delegate.close(); + } + + public void delete(long addr) { + delegate.delete(addr); + } + + public void deleteResources() { + delegate.deleteResources(); + } + + public void destroy() { + delegate.destroy(); + } + + public void force(boolean metadata) { + delegate.force(metadata); + } + + public CounterSet getCounters() { + return delegate.getCounters(); + } + + public File getFile() { + return delegate.getFile(); + } + + public IResourceMetadata getResourceMetadata() { + return delegate.getResourceMetadata(); + } + + public UUID getUUID() { + return delegate.getUUID(); + } + + public boolean isFullyBuffered() { + return delegate.isFullyBuffered(); + } + + public boolean isOpen() { + return delegate.isOpen(); + } + + public boolean isReadOnly() { + return delegate.isOpen(); + } + + public boolean isStable() { + return delegate.isStable(); + } + + public ByteBuffer read(long addr) { + return delegate.read(addr); + } + + public long size() { + return delegate.size(); + } + + public long write(ByteBuffer data) { + return delegate.write(data); + } + + public long write(ByteBuffer data, long oldAddr) { + return delegate.write(data, oldAddr); + } + + public int getByteCount(long addr) { + return delegate.getByteCount(addr); + } + + public long getOffset(long addr) { + return delegate.getOffset(addr); + } + + public long toAddr(int nbytes, long offset) { + return delegate.toAddr(nbytes, offset); + } + + public String toString(long addr) { + return delegate.toString(addr); + } + + public IIndex getIndex(String name) { + return delegate.getIndex(name); + } + + public IIndex registerIndex(String name, BTree btree) { + return delegate.registerIndex(name, btree); + } + + public IIndex registerIndex(String name, IndexMetadata indexMetadata) { + return delegate.registerIndex(name, indexMetadata); + } + + public void dropIndex(String name) { + delegate.dropIndex(name); + } + + public void registerIndex(IndexMetadata indexMetadata) { + delegate.registerIndex(indexMetadata); + } + + public ExecutorService getExecutorService() { + return delegate.getExecutorService(); + } + + public BigdataFileSystem getGlobalFileSystem() { + return delegate.getGlobalFileSystem(); + } + + public SparseRowStore getGlobalRowStore() { + return delegate.getGlobalRowStore(); + } + + public IIndex getIndex(String name, long timestamp) { + return delegate.getIndex(name, timestamp); + } + + public long getLastCommitTime() { + return delegate.getLastCommitTime(); + } + + public IResourceLocator getResourceLocator() { + return delegate.getResourceLocator(); + } + + public IResourceLockService getResourceLockService() { + return delegate.getResourceLockService(); + } + + public TemporaryStore getTempStore() { + return delegate.getTempStore(); + } + + public void delete(long addr, IAllocationContext context) { + delegate.delete(addr, context); + } + + public long write(ByteBuffer data, IAllocationContext context) { + return delegate.write(data, context); + } + + public long write(ByteBuffer data, long oldAddr, IAllocationContext context) { + return delegate.write(data, oldAddr, context); + } + + public void detachContext(IAllocationContext context) { + delegate.detachContext(context); + } +} Property changes on: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalDelegate.java ___________________________________________________________________ Added: svn:mime-type + text/plain Added: svn:keywords + Id Date Revision Author HeadURL Added: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalShadow.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalShadow.java (rev 0) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalShadow.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -0,0 +1,85 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.journal; + +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicLong; + +/** + * A JournalShadow wraps a Journal as a JournalDelegate but provides itself + * as the allocation context to be passed through to any interested + * BufferStrategy. + * + * This is the path by which RWStore allocators are provided the context for + * the allocations and deletes made + * + * @author Martyn Cutcher + * + */ +public class JournalShadow extends JournalDelegate implements IAllocationContext { + static AtomicLong s_idCounter = new AtomicLong(23); + int m_id = (int) s_idCounter.incrementAndGet(); + + public JournalShadow(IJournal source) { + super(source); + } + + public long write(ByteBuffer data) { + return delegate.write(data, this); + } + + public long write(ByteBuffer data, long oldAddr) { + return delegate.write(data, oldAddr, this); + } + + public void delete(long oldAddr) { + delegate.delete(oldAddr, this); + } + + public int compareTo(Object o) { + if (o instanceof JournalShadow) { + JournalShadow js = (JournalShadow) o; + return m_id - js.m_id; + } else { + return -1; + } + } + + /** + * TODO: should retrieve from localTransactionService or Journal + * properties + */ + public long minimumReleaseTime() { + return 0; + } + + /** + * Release itself from the wrapped Journal, this unlocks the allocator for + * the RWStore + */ + public void detach() { + delegate.detachContext(this); + } +} Property changes on: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalShadow.java ___________________________________________________________________ Added: svn:mime-type + text/plain Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalTransactionService.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalTransactionService.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/JournalTransactionService.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -33,6 +33,7 @@ import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import com.bigdata.service.AbstractFederation; import com.bigdata.service.AbstractTransactionService; @@ -514,4 +515,25 @@ } } + /** + * Invoke a method with the {@link AbstractTransactionService}'s lock held. + * + * But throw immediate exception if try fails. + * + * @param <T> + * @param callable + * @return + * @throws Exception + */ + public <T> T tryCallWithLock(final Callable<T> callable, long waitFor, TimeUnit unit) throws Exception { + if (!lock.tryLock(waitFor,unit)) { + throw new RuntimeException("Lock not available"); + } + try { + return callable.call(); + } finally { + lock.unlock(); + } + } + } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -81,6 +81,8 @@ final private FileMetadataView m_fmv = new FileMetadataView(); + private volatile boolean m_open = false; + private volatile IRootBlockView m_rb; private volatile IRootBlockView m_rb0; private volatile IRootBlockView m_rb1; @@ -110,6 +112,7 @@ m_rb = fileMetadata.rootBlock; m_store = new RWStore(m_fmv, false, quorum); // not read-only for now + m_open = true; m_rb0 = copyRootBlock(true); m_rb1 = copyRootBlock(false); @@ -233,6 +236,10 @@ } public long write(ByteBuffer data) { + return write(data, null); + } + + public long write(ByteBuffer data, IAllocationContext context) { checkReopen(); if (data == null) { @@ -246,7 +253,7 @@ } try { - long rwaddr = m_store.alloc(data.array(), nbytes); + long rwaddr = m_store.alloc(data.array(), nbytes, context); data.position(nbytes); // update position to end of buffer long retaddr = encodeAddr(rwaddr, nbytes); @@ -296,30 +303,27 @@ return (int) (addr & 0xFFFFFFFF); } + public void delete(long addr) { + if (true) delete(addr, null); + } + /** * Must check whether there are existing transactions which may access * this data, and if not free immediately, otherwise defer. */ - public void delete(long addr) { - final JournalTransactionService service = (JournalTransactionService) (localTransactionManager == null ? null - : localTransactionManager.getTransactionService()); + public void delete(long addr, IAllocationContext context) { final int rwaddr = decodeAddr(addr); final int sze = decodeSize(addr); - // FIXME: need to decide on correct way to handle transaction oriented - // allocations - if (true || service == null) { - m_store.free(rwaddr, sze); - } else { - /* - * May well be better to always defer and then free in batch, - * but for now need to confirm transaction logic - */ - m_store.deferFree(rwaddr, sze, m_rb.getLastCommitTime()); - } + m_store.free(rwaddr, sze, context); } + + public void detachContext(IAllocationContext context) { + m_store.detachContext(context); + } + public static class RWAddressManager implements IAddressManager { public int getByteCount(long addr) { @@ -428,8 +432,8 @@ try { m_store.checkRootBlock(rootBlock); - if (log.isInfoEnabled()) { - log.info("Writing new rootblock with commitCounter: " + if (log.isTraceEnabled()) { + log.trace("Writing new rootblock with commitCounter: " + rootBlock.getCommitCounter() + ", commitRecordAddr: " + rootBlock.getCommitRecordAddr() + ", commitRecordIndexAddr: " + rootBlock.getCommitRecordIndexAddr()); @@ -461,6 +465,8 @@ throw new IllegalStateException(); } try { + m_open = false; + m_store.close(); m_fileMetadata.raf.close(); m_fileMetadata.raf = null; @@ -563,6 +569,7 @@ m_fileMetadata.raf = new RandomAccessFile(m_fileMetadata.file, m_fileMetadata.fileMode); m_store = new RWStore(m_fmv, false, m_environment); // never read-only for now m_needsReopen = false; + m_open = true; } catch (Throwable t) { t.printStackTrace(); @@ -593,7 +600,8 @@ } public boolean isOpen() { - return m_fileMetadata.raf != null && m_fileMetadata.raf.getChannel().isOpen(); + // return m_fileMetadata.raf != null && m_fileMetadata.raf.getChannel().isOpen(); + return m_open; } public boolean isReadOnly() { @@ -714,4 +722,25 @@ m_store.setTransactionService((JournalTransactionService) localTransactionManager.getTransactionService()); } + public long getPhysicalAddress(long addr) { + int rwaddr = decodeAddr(addr); + + return m_store.physicalAddress(rwaddr); + } + + /** + * Saves the current list of delete blocks, returning the address allocated. + * This can be used later to retrieve the addresses of allocations to be + * freed. + * + * @return the address of the delete blocks, or zero if none + */ + public long saveDeleteBlocks() { + return m_store.saveDeferrals(); + } + + public void setCommitRecordIndex(CommitRecordIndex commitRecordIndex) { + m_store.setCommitRecordIndex(commitRecordIndex); + } + } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/WORMStrategy.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -2240,4 +2240,8 @@ } + public void setCommitRecordIndex(CommitRecordIndex commitRecordIndex) { + // NOP + } + } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/AbstractRawStore.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/AbstractRawStore.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/AbstractRawStore.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -30,6 +30,7 @@ import java.nio.ByteBuffer; import com.bigdata.LRUNexus; +import com.bigdata.journal.IAllocationContext; /** * Abstract base class for {@link IRawStore} implementations. This class uses a @@ -76,4 +77,23 @@ public void delete(long addr) { // NOP. } + + public void delete(long addr, IAllocationContext context) { + delete(addr); + } + + public long write(ByteBuffer data, IAllocationContext context) { + return write(data); + } + + public long write(ByteBuffer data, long oldAddr, IAllocationContext context) { + return write(data, oldAddr); + } + + /** + * The default implementation is a NOP. + */ + public void detachContext(IAllocationContext context) { + // NOP + } } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/IRawStore.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/IRawStore.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rawstore/IRawStore.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -37,6 +37,7 @@ import com.bigdata.counters.CounterSet; import com.bigdata.io.IByteArrayBuffer; import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.IAllocationContext; import com.bigdata.mdi.IResourceMetadata; /** @@ -124,6 +125,7 @@ public long write(ByteBuffer data); /** + * Write the data (unisolated). * * @param data * The data. The bytes from the current @@ -133,6 +135,43 @@ * {@link ByteBuffer#limit()} . The caller may subsequently * modify the contents of the buffer without changing the state * of the store (i.e., the data are copied into the store). + * + * @param context defines teh shadow AllocationContext from which this call + * was made + * + * @return A long integer formed that encodes both the offset from which the + * data may be read and the #of bytes to be read. See + * {@link IAddressManager}. + * + * @throws IllegalArgumentException + * if <i>data</i> is <code>null</code>. + * @throws IllegalArgumentException + * if <i>data</i> has zero bytes {@link ByteBuffer#remaining()}. + * @throws IllegalStateException + * if the store is not open. + * @throws IllegalStateException + * if the store does not allow writes. + * + * @todo define exception if the maximum extent would be exceeded. + * + * @todo the addresses need to reflect the ascending offset at which the + * data are written, at least for a class of append only store. some + * stores, such as the Journal, also have an offset from the start of + * the file to the start of the data region (in the case of the + * Journal it is used to hold the root blocks). + */ + public long write(ByteBuffer data, IAllocationContext context); + + /** + * + * @param data + * The data. The bytes from the current + * {@link ByteBuffer#position()} to the + * {@link ByteBuffer#limit()} will be written and the + * {@link ByteBuffer#position()} will be advanced to the + * {@link ByteBuffer#limit()} . The caller may subsequently + * modify the contents of the buffer without changing the state + * of the store (i.e., the data are copied into the store). * @param oldAddr as returned from a previous write of the same object, or zero if a new write * * @return A long integer formed that encodes both the offset from which the @@ -141,6 +180,25 @@ */ public long write(ByteBuffer data, long oldAddr); + /** + * + * @param data + * The data. The bytes from the current + * {@link ByteBuffer#position()} to the + * {@link ByteBuffer#limit()} will be written and the + * {@link ByteBuffer#position()} will be advanced to the + * {@link ByteBuffer#limit()} . The caller may subsequently + * modify the contents of the buffer without changing the state + * of the store (i.e., the data are copied into the store). + * @param oldAddr as returned from a previous write of the same object, or zero if a new write + * @param context defines the shadow AllocationContext from which this call is made + * + * @return A long integer formed that encodes both the offset from which the + * data may be read and the #of bytes to be read. See + * {@link IAddressManager}. + */ + public long write(ByteBuffer data, long oldAddr, IAllocationContext context); + /** * Delete the data (unisolated). * <p> @@ -168,6 +226,48 @@ public void delete(long addr); /** + * Delete the data (unisolated). + * <p> + * After this operation subsequent reads on the address MAY fail and the + * caller MUST NOT depend on the ability to read at that address. + * + * @param addr + * A long integer formed using {@link Addr} that encodes both the + * offset at which the data was written and the #of bytes that + * were written. + * + * @param context + * Defines the shadow AllocationContext from which this call is + * made. For RWStore this can be used to immediately free the + * allocation if it can be determined to have orignally have + * been requested from the same context. + * + * @exception IllegalArgumentException + * If the address is known to be invalid (never written or + * deleted). Note that the address 0L is always invalid. + * + * It is only applicable in the + * context of a garbage collection strategy. With an append only + * store and with eviction of btrees into index segments there + * is no reason to delete anything on the store - and nothing to + * keep track of the delete. + * + * However, with a Read-Write store it is a requirement, and a void + * implementation is provided for other stores. + */ + public void delete(long addr, IAllocationContext context); + + /** + * + * @param context + * Defines the shadow AllocationContext that may have been used + * to allocate or delete storage. The RWStore assigns + * Allocation areas to specific contexts and these must be + * released for use by others. + */ + public void detachContext(IAllocationContext context); + + /** * Read the data (unisolated). * * @param addr Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -43,8 +43,10 @@ import com.bigdata.journal.ConcurrencyManager; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.IIndexStore; +import com.bigdata.journal.IJournal; import com.bigdata.journal.ITx; import com.bigdata.journal.Journal; +import com.bigdata.journal.JournalShadow; import com.bigdata.journal.TimestampUtility; import com.bigdata.relation.IMutableRelation; import com.bigdata.relation.accesspath.ChunkConsumerIterator; @@ -503,6 +505,11 @@ * the mutation task will read. */ tx = jnl.newTx(lastCommitTime); + + /* + * Create the shadow journal to define the allocation context + */ + indexManager = new JournalShadow(jnl); // the timestamp that we will read on for this step. joinNexusFactory.setReadTimestamp(TimestampUtility Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -20,13 +20,14 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ + */ package com.bigdata.rwstore; import java.util.ArrayList; import com.bigdata.io.writecache.WriteCacheService; +import com.bigdata.journal.IAllocationContext; /** * Bit maps for an allocator. The allocator is a bit map managed as int[]s. @@ -58,6 +59,11 @@ */ int m_commit[]; /** + * If used as a shadow allocator, then the _commit is saved to m_saveCommit + * and m_transients is copied to m_commit. + */ + int m_saveCommit[]; + /** * Just the newly allocated bits. This will be copied onto {@link #m_commit} * when the current native transaction commits. */ @@ -73,113 +79,152 @@ */ private final RWWriteCacheService m_writeCache; - AllocBlock(final int addrIsUnused, final int bitSize, final RWWriteCacheService cache) { - m_writeCache = cache; - m_ints = bitSize; - m_commit = new int[bitSize]; - m_bits = new int[bitSize]; - m_transients = new int[bitSize]; - } + AllocBlock(final int addrIsUnused, final int bitSize, final RWWriteCacheService cache) { + m_writeCache = cache; + m_ints = bitSize; + m_commit = new int[bitSize]; + m_bits = new int[bitSize]; + m_transients = new int[bitSize]; + } - public boolean verify(final int addr, final int size) { - if (addr < m_addr || addr >= (m_addr + (size * 32 * m_ints))) { - return false; - } + public boolean verify(final int addr, final int size) { + if (addr < m_addr || addr >= (m_addr + (size * 32 * m_ints))) { + return false; + } // Now check to see if it allocated - final int bit = (addr - m_addr) / size; + final int bit = (addr - m_addr) / size; - return RWStore.tstBit(m_bits, bit); - } + return RWStore.tstBit(m_bits, bit); + } - public boolean addressInRange(final int addr, final int size) { - return (addr >= m_addr && addr <= (m_addr + (size * 32 * m_ints))); - } - - public boolean free(final int addr, final int size) { - if (addr < m_addr || addr >= (m_addr + (size * 32 * m_ints))) { - return false; - } + public boolean addressInRange(final int addr, final int size) { + return (addr >= m_addr && addr <= (m_addr + (size * 32 * m_ints))); + } - freeBit((addr - m_addr) / size, addr); + public boolean free(final int addr, final int size) { + if (addr < m_addr || addr >= (m_addr + (size * 32 * m_ints))) { + return false; + } - return true; - } + freeBit((addr - m_addr) / size); - public boolean freeBit(final int bit, final long addr) { - // Allocation optimization - if bit NOT set in committed memory then clear - // the transient bit to permit reallocation within this transaction. - // - // Note that with buffered IO there is also an opportunity to avoid output to - // the file by removing any pending write to the now freed address. On large - // transaction scopes this may be significant. - RWStore.clrBit(m_bits, bit); - - if (!RWStore.tstBit(m_commit, bit)) { - // Should not be cleared here! - // m_writeCache.clearWrite(addr); + return true; + } - RWStore.clrBit(m_transients, bit); - - return true; - } else { - return false; - } - } + public boolean freeBit(final int bit) { + if (!RWStore.tstBit(m_bits, bit)) { + throw new IllegalArgumentException("Freeing bit not set"); + } + + // Allocation optimization - if bit NOT set in committed memory then + // clear + // the transient bit to permit reallocation within this transaction. + // + // Note that with buffered IO there is also an opportunity to avoid + // output to + // the file by removing any pending write to the now freed address. On + // large + // transaction scopes this may be significant. + RWStore.clrBit(m_bits, bit); - public int alloc(final int size) { - if (size < 0) { - throw new Error("Storage allocation error : negative size passed"); - } + if (!RWStore.tstBit(m_commit, bit)) { + RWStore.clrBit(m_transients, bit); - final int bit = RWStore.fndBit(m_transients, m_ints); + return true; + } else { + return false; + } + } - if (bit != -1) { - RWStore.setBit(m_bits, bit); - RWStore.setBit(m_transients, bit); + /** + * The shadow, if non-null defines the context for this request. + * + * If an existing shadow is registered, then the allocation fails + * immediately. + * + * If no existing shadow is registered, and a new allocation can be made + * then this AllocBlock is registered with the shadow. + * + * Note that when shadows are used, an allocator on a free list may not have + * allocations available for all contexts, so the assumption that presence + * on the free list implies availability is not assertable. + */ - return bit; - } else { - return -1; - } - } + public int alloc(final int size) { + if (size < 0) { + throw new Error("Storage allocation error : negative size passed"); + } - public boolean hasFree() { - for (int i = 0; i < m_ints; i++) { - if (m_bits[i] != 0xFFFFFFFF) { - return true; - } - } + final int bit = RWStore.fndBit(m_transients, m_ints); - return false; - } + if (bit != -1) { + RWStore.setBit(m_bits, bit); + RWStore.setBit(m_transients, bit); + return bit; + } else { + return -1; + } + } + + public boolean hasFree() { + for (int i = 0; i < m_ints; i++) { + if (m_bits[i] != 0xFFFFFFFF) { + return true; + } + } + + return false; + } + public int getAllocBits() { - int total = m_ints * 32; - int allocBits = 0; - for (int i = 0; i < total; i++) { - if (RWStore.tstBit(m_bits, i)) { - allocBits++; - } - } - - return allocBits; + int total = m_ints * 32; + int allocBits = 0; + for (int i = 0; i < total; i++) { + if (RWStore.tstBit(m_bits, i)) { + allocBits++; + } + } + + return allocBits; } - public String getStats() { - final int total = m_ints * 32; - final int allocBits = getAllocBits(); + public String getStats() { + final int total = m_ints * 32; + final int allocBits = getAllocBits(); - return "Addr : " + m_addr + " [" + allocBits + "::" + total + "]"; - } + return "Addr : " + m_addr + " [" + allocBits + "::" + total + "]"; + } - public void addAddresses(final ArrayList addrs, final int rootAddr) { - final int total = m_ints * 32; - - for (int i = 0; i < total; i++) { - if (RWStore.tstBit(m_bits, i)) { - addrs.add(new Integer(rootAddr - i)); - } - } - } + public void addAddresses(final ArrayList addrs, final int rootAddr) { + final int total = m_ints * 32; + + for (int i = 0; i < total; i++) { + if (RWStore.tstBit(m_bits, i)) { + addrs.add(new Integer(rootAddr - i)); + } + } + } + + /** + * Store m_commit bits in m_saveCommit then duplicate transients to m_commit. + * + * This ensures, that while shadowed, the allocator will not re-use storage + * that was allocated prior to the shadow creation. + */ + public void shadow() { + m_saveCommit = m_commit; + m_commit = m_transients.clone(); + } + + /** + * The transient bits will have been added to correctly, we now just need to + * restore the commit bits from the m_saveCommit, to allow re-allocation + * of non-committed storage. + */ + public void deshadow() { + m_commit = m_saveCommit; + m_saveCommit = null; + } } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/Allocator.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/Allocator.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/Allocator.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -28,6 +28,8 @@ import java.util.ArrayList; import java.util.concurrent.atomic.AtomicLong; +import com.bigdata.journal.IAllocationContext; + public interface Allocator extends Comparable { public int getBlockSize(); public void setIndex(int index); @@ -35,7 +37,7 @@ public long getStartAddr(); public boolean addressInRange(int addr); public boolean free(int addr, int size); - public int alloc(RWStore store, int size); + public int alloc(RWStore store, int size, IAllocationContext context); public int getDiskAddr(); public void setDiskAddr(int addr); public long getPhysicalAddress(int offset); @@ -46,10 +48,10 @@ public boolean hasFree(); public void setFreeList(ArrayList list); public String getStats(AtomicLong counter); - public void preserveSessionData(); public void addAddresses(ArrayList addrs); public int getRawStartAddr(); public int getIndex(); public void appendShortStats(StringBuffer str); + public boolean canImmediatelyFree(int addr, int size, IAllocationContext context); } \ No newline at end of file Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/BlobAllocator.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/BlobAllocator.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/BlobAllocator.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -7,6 +7,7 @@ import java.util.ArrayList; import java.util.concurrent.atomic.AtomicLong; +import com.bigdata.journal.IAllocationContext; import com.bigdata.util.ChecksumUtility; /** @@ -35,6 +36,8 @@ public BlobAllocator(RWStore store, int sortAddr) { m_store = store; m_sortAddr = sortAddr; + + System.out.println("New BlobAllocator"); } public void addAddresses(ArrayList addrs) { @@ -46,7 +49,7 @@ return false; } - public int alloc(RWStore store, int size) { + public int alloc(RWStore store, int size, IAllocationContext context) { assert size > m_store.m_maxFixedAlloc; return 0; @@ -92,7 +95,41 @@ return false; } + + public int getFirstFixedForBlob(int addr, int sze) { + if (sze < m_store.m_maxFixedAlloc) + throw new IllegalArgumentException("Unexpected address size"); + int alloc = m_store.m_maxFixedAlloc-4; + int blcks = (alloc - 1 + sze)/alloc; + + int hdr_idx = (-addr) & RWStore.OFFSET_BITS_MASK; + if (hdr_idx > m_hdrs.length) + throw new IllegalArgumentException("free BlobAllocation problem, hdr offset: " + hdr_idx + ", avail:" + m_hdrs.length); + + int hdr_addr = m_hdrs[hdr_idx]; + + if (hdr_addr == 0) { + throw new IllegalArgumentException("getFirstFixedForBlob called with unallocated address"); + } + + // read in header block, then free each reference + byte[] hdr = new byte[(blcks+1) * 4 + 4]; // add space for checksum + m_store.getData(hdr_addr, hdr); + + try { + DataInputStream instr = new DataInputStream( + new ByteArrayInputStream(hdr, 0, hdr.length-4) ); + int nallocs = instr.readInt(); + int faddr = instr.readInt(); + + return faddr; + + } catch (IOException ioe) { + throw new RuntimeException("Unable to retrieve first fixed address", ioe); + } + } + public int getBlockSize() { // Not relevant for Blobs return 0; @@ -269,4 +306,22 @@ return m_hdrs[offset] != 0; } + /** + * This is okay as a NOP. The true allocation is managed by the + * FixedAllocators. + */ + public void detachContext(IAllocationContext context) { + // NOP + } + + /** + * Since the real allocation is in the FixedAllocators, this should delegate + * to the first address, in which case + */ + public boolean canImmediatelyFree(int addr, int size, IAllocationContext context) { + int faddr = this.getFirstFixedForBlob(addr, size); + + return m_store.getBlockByAddress(faddr).canImmediatelyFree(faddr, 0, context); + } + } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2010-09-01 13:35:08 UTC (rev 3476) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2010-09-01 14:19:05 UTC (rev 3477) @@ -30,6 +30,7 @@ import org.apache.log4j.Logger; +import com.bigdata.journal.IAllocationContext; import com.bigdata.util.ChecksumUtility; /** @@ -47,8 +48,6 @@ private int m_diskAddr; int m_index; - protected boolean m_preserveSession = false; - public void setIndex(int index) { AllocBlock fb = (AllocBlock) m_allocBlocks.get(0); if (log.isDebugEnabled()) @@ -57,10 +56,6 @@ m_index = index; } - public void preserveSessionData() { - m_preserveSession = true; - } - public long getStartAddr() { return RWStore.convertAddr(m_startAddr); } @@ -125,9 +120,31 @@ } } + volatile private IAllocationContext m_context; + public void setAllocationContext(IAllocationContext context) { + if (context == null && m_context != null) { + // restore commit bits in AllocBlocks + for (AllocBlock allocBlock : m_allocBlocks) { + allocBlock.deshadow(); + } + } else if (context != null & m_context == null) { + // restore commit bits in AllocBlocks + for (AllocBlock allocBlock : m_allocBlocks) { + allocBlock.shadow(); + } + } + m_context = context; + } + + /** + * write called on commit, so this is the point when "transient frees" - the + * freeing of previously committed memory can be made available since we + * are creating a new commit point - the condition being that m_freeBits + * was zero and m_freeTransients not. + */ public byte[] write() { try { - final AllocBlock fb = (AllocBlock) m_allocBlocks.get(0); + final AllocBlock fb = m_allocBlocks.get(0); if (log.isDebugEnabled()) log.debug("writing allocator " + m_index + " for " + getStartAddr() + " with " + fb.m_bits[0]); final byte[] buf = new byte[1024]; @@ -144,17 +161,29 @@ str.writeInt(block.m_bits[i]); } - if (!m_preserveSession) { - block.m_transients = (int[]) block.m_bits.clone(); + if (!m_store.isSessionPreserved()) { + block.m_transients = block.m_bits.clone(); } - block.m_commit = (int[]) block.m_bits.clone(); + /** + * If this allocator is shadowed then copy the new + * committed state to m_saveCommit + */ + if (m_context != null) { + assert block.m_saveCommit != null; + + block.m_saveCommit = block.m_bits.clone(); + } else if (m_store.isSessionPreserved()) { + block.m_commit = block.m_transients.clone(); + } else { + block.m_commit = block.m_bits.clone(); + } } // add checksum final int chk = ChecksumUtility.getCHK().checksum(buf, str.size()); str.writeInt(chk); - if (!m_preserveSession) { + if (!m_store.isSessionPreserved()) { m_freeBits += m_freeTransients; // Handle re-addition to free list once transient frees are @@ -234,6 +263,8 @@ private final ArrayList<AllocBlock> m_allocBlocks; + private RWStore m_store; + /** * Calculating the number of ints (m_bitSize) cannot rely on a power of 2. Previously this * assumption was sufficient to guarantee a rounding on to an 64k boundary. However, now @@ -248,8 +279,9 @@ * @param preserveSessionData * @param cache */ - FixedAllocator(final int size, final boolean preserveSessionData, final RWWriteCacheService cache) { + FixedAllocator(final RWStore store, final int size, final RWWriteCacheService cache) { m_diskAddr = 0; + m_store = store; m_size = size; @@ -289,8 +321,6 @@ m_freeTransients = 0; m_freeBits = 32 * m_bitSize * numBlocks; - - m_preserveSession = preserveSessionData; } public String getStats(final AtomicLong counter) { @@ -351,7 +381,7 @@ final int block = offset/nbits; if (((AllocBlock) m_allocBlocks.get(block)) - .freeBit(offset % nbits, getPhysicalAddress(offset + 3))) { // bit adjust + .freeBit(offset % n... [truncated message content] |
From: <tho...@us...> - 2010-09-01 13:35:28
|
Revision: 3476 http://bigdata.svn.sourceforge.net/bigdata/?rev=3476&view=rev Author: thompsonbry Date: 2010-09-01 13:35:08 +0000 (Wed, 01 Sep 2010) Log Message: ----------- Corrected compile time error introduced into the lubm Test.java class when it was modified to use NicUtil. Modified Paths: -------------- trunk/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java Modified: trunk/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java =================================================================== --- trunk/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java 2010-08-31 21:03:30 UTC (rev 3475) +++ trunk/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java 2010-09-01 13:35:08 UTC (rev 3476) @@ -85,7 +85,7 @@ hostname = NicUtil.getIpAddress("default.nic", "default", false); } catch(Throwable t) {//for now, maintain same failure logic as used previously t.printStackTrace(); - s = NicUtil.getIpAddressByLocalHost(); + hostname = NicUtil.getIpAddressByLocalHost(); } QUERY_TEST_RESULT_FILE = hostname + "-result.txt"; } else { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <sgo...@us...> - 2010-08-31 21:03:36
|
Revision: 3475 http://bigdata.svn.sourceforge.net/bigdata/?rev=3475&view=rev Author: sgossard Date: 2010-08-31 21:03:30 +0000 (Tue, 31 Aug 2010) Log Message: ----------- [maven_scaleout] : Cleanup dependency version numbers that were overly restrictive to downstream projects. Modified Paths: -------------- branches/maven_scaleout/pom.xml Modified: branches/maven_scaleout/pom.xml =================================================================== --- branches/maven_scaleout/pom.xml 2010-08-31 20:15:29 UTC (rev 3474) +++ branches/maven_scaleout/pom.xml 2010-08-31 21:03:30 UTC (rev 3475) @@ -12,30 +12,7 @@ <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> - - <test.codebase.port>23333</test.codebase.port> - - <!-- Non-public bigdata dependencies. Used for install and deploy phases. --> - <thirdParty.groupId>com.bigdata.thirdparty</thirdParty.groupId> - <!-- TODO: look at maven-bundle-plugin from felix to provide osgi support. bndlib version 0.0.357 in central. --> - <cweb-extser.artifactId>cweb-extser</cweb-extser.artifactId> - <cweb-extser.version>0.1.0-b2-dev</cweb-extser.version> - <unimi-fastutil.artifactId>unimi-fastutil</unimi-fastutil.artifactId> - <unimi-fastutil.version>5.1.5</unimi-fastutil.version> - <lgpl-utils.artifactId>lgpl-utils</lgpl-utils.artifactId> - <lgpl-utils.version>1.0.6</lgpl-utils.version> - <ctc-utils.artifactId>ctc-utils</ctc-utils.artifactId> - <ctc-utils.version>5-4-2005</ctc-utils.version> - <dsi-utils.artifactId>dsi-utils</dsi-utils.artifactId> - <dsi-utils.version>1.0.6-020610</dsi-utils.version> - <colt.artifactId>colt</colt.artifactId> - <colt.version>1.2.0</colt.version> - <high-scale.artifactId>high-scale-lib</high-scale.artifactId> - <high-scale.version>1.1.2</high-scale.version> - <iris.artifactId>iris</iris.artifactId> - <iris.version>0.58</iris.version> - <nxparser.artifactId>nxparser</nxparser.artifactId> - <nxparser.version>6-22-2010</nxparser.version> + <thirdParty.groupId>com.bigdata.thirdparty</thirdParty.groupId><!-- group ID for non-public bigdata dependencies. --> </properties> @@ -232,10 +209,13 @@ <!-- ************************ Start of non-public dependencies ************************ --> <!-- ************************ Start of non-public dependencies ************************ --> <!-- ************************ Start of non-public dependencies ************************ --> + + <!-- TODO: look at maven-bundle-plugin from felix to provide osgi support. bndlib version 0.0.357 in central. --> + <dependency> <groupId>${thirdParty.groupId}</groupId> - <artifactId>${cweb-extser.artifactId}</artifactId> - <version>[${cweb-extser.version}]</version> + <artifactId>cweb-extser</artifactId> + <version>0.1.0-b2-dev</version> </dependency> <dependency> <groupId>${thirdParty.groupId}</groupId> @@ -251,38 +231,38 @@ <dependency> <groupId>${thirdParty.groupId}</groupId> <!-- TODO: An older version (5.0.9) is available in central. --> - <artifactId>${unimi-fastutil.artifactId}</artifactId> - <version>[${unimi-fastutil.version}]</version> - </dependency> + <artifactId>unimi-fastutil</artifactId> + <version>5.1.5</version> + </dependency> <dependency> <groupId>${thirdParty.groupId}</groupId> - <artifactId>${lgpl-utils.artifactId}</artifactId> - <version>[${lgpl-utils.version}]</version> + <artifactId>lgpl-utils</artifactId> + <version>1.0.6</version> </dependency> <dependency> <groupId>${thirdParty.groupId}</groupId> - <artifactId>${ctc-utils.artifactId}</artifactId> - <version>[${ctc-utils.version}]</version> + <artifactId>ctc-utils</artifactId> + <version>5-4-2005</version> </dependency> <dependency> <groupId>${thirdParty.groupId}</groupId> - <artifactId>${dsi-utils.artifactId}</artifactId> - <version>[${dsi-utils.version}]</version> + <artifactId>dsi-utils</artifactId> + <version>1.0.6-020610</version> </dependency> - <dependency> + <dependency> <groupId>${thirdParty.groupId}</groupId> - <artifactId>${high-scale.artifactId}</artifactId> - <version>[${high-scale.version}]</version> + <artifactId>high-scale-lib</artifactId> + <version>1.1.2</version> </dependency> <dependency> - <groupId>${thirdParty.groupId}</groupId> - <artifactId>${iris.artifactId}</artifactId> - <version>[${iris.version}]</version> + <groupId>${thirdParty.groupId}</groupId> + <artifactId>iris</artifactId> + <version>0.58</version> </dependency> <dependency> - <groupId>${thirdParty.groupId}</groupId> - <artifactId>${nxparser.artifactId}</artifactId> - <version>${nxparser.version}</version> + <groupId>${thirdParty.groupId}</groupId> + <artifactId>nxparser</artifactId> + <version>6-22-2010</version> </dependency> <dependency> @@ -295,28 +275,28 @@ <dependency> <groupId>org.apache.river</groupId> <artifactId>tools</artifactId> - <version>[2.1]</version> + <version>2.1</version> </dependency> <dependency> <groupId>org.apache.river</groupId> <artifactId>start</artifactId> - <version>[2.1]</version> + <version>2.1</version> </dependency> <dependency> <groupId>org.apache.river</groupId> <artifactId>tools</artifactId> - <version>[2.1]</version> + <version>2.1</version> </dependency> <dependency> <groupId>org.apache.river</groupId> <artifactId>jsk-lib</artifactId> - <version>[2.1]</version> + <version>2.1</version> </dependency> <dependency> <groupId>org.apache.river</groupId> <artifactId>jsk-platform</artifactId> - <version>[2.1]</version> + <version>2.1</version> </dependency> <dependency> @@ -581,13 +561,7 @@ <groupId>commons-httpclient</groupId> <artifactId>commons-httpclient</artifactId> <version>3.1</version> - </dependency> - - <!--<!– JMX –>--> <!--<dependency> <!– Pretty sure this isn't needed JDK1.5 + –>--> - <!--<groupId>com.sun.jmx</groupId>--> - <!--<artifactId>jmxri</artifactId>--> - <!--<version>1.2.1</version>--> - <!--</dependency>--> + </dependency> </dependencies> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |