This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tho...@us...> - 2014-04-30 19:13:14
|
Revision: 8159 http://sourceforge.net/p/bigdata/code/8159 Author: thompsonbry Date: 2014-04-30 19:13:09 +0000 (Wed, 30 Apr 2014) Log Message: ----------- {{{ update /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE -r HEAD --force At revision 8158. }}} Note: The last merge into the RDR branch was committed at r8133. The next change to the main branch was at r8137. This merge picks up all changes from r8136 to the head of the main branch. {{{ merge -r 8136:8142 https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0 /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE --- Merging r8136 through r8142 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java C /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/brew U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt --- Merging r8136 through r8142 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/internal U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java Merge complete. ===== File Statistics: ===== Updated: 3 ==== Conflict Statistics: ===== Tree conflicts: 1 }}} @see #526 (RDR) Revision Links: -------------- http://sourceforge.net/p/bigdata/code/8158 http://sourceforge.net/p/bigdata/code/8133 http://sourceforge.net/p/bigdata/code/8137 http://sourceforge.net/p/bigdata/code/8136 http://sourceforge.net/p/bigdata/code/8136 http://sourceforge.net/p/bigdata/code/8142 http://sourceforge.net/p/bigdata/code/8136 http://sourceforge.net/p/bigdata/code/8142 Modified Paths: -------------- branches/RDR/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java Property Changed: ---------------- branches/RDR/ branches/RDR/bigdata/lib/jetty/ branches/RDR/bigdata/src/java/com/bigdata/bop/aggregate/ branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph/ branches/RDR/bigdata/src/java/com/bigdata/bop/util/ branches/RDR/bigdata/src/java/com/bigdata/htree/raba/ branches/RDR/bigdata/src/java/com/bigdata/jsr166/ branches/RDR/bigdata/src/test/com/bigdata/bop/joinGraph/ branches/RDR/bigdata/src/test/com/bigdata/bop/util/ branches/RDR/bigdata/src/test/com/bigdata/jsr166/ branches/RDR/bigdata/src/test/com/bigdata/util/httpd/ branches/RDR/bigdata-compatibility/ branches/RDR/bigdata-jini/src/java/com/bigdata/attr/ branches/RDR/bigdata-jini/src/java/com/bigdata/disco/ branches/RDR/bigdata-jini/src/java/com/bigdata/util/config/ branches/RDR/bigdata-perf/ branches/RDR/bigdata-perf/btc/ branches/RDR/bigdata-perf/btc/src/resources/ branches/RDR/bigdata-perf/lubm/ branches/RDR/bigdata-perf/uniprot/ branches/RDR/bigdata-perf/uniprot/src/ branches/RDR/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/error/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/RDR/bigdata-rdf/src/samples/ branches/RDR/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/ branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ branches/RDR/dsi-utils/ branches/RDR/dsi-utils/LEGAL/ branches/RDR/dsi-utils/lib/ branches/RDR/dsi-utils/src/ branches/RDR/dsi-utils/src/java/ branches/RDR/dsi-utils/src/java/it/ branches/RDR/dsi-utils/src/java/it/unimi/ branches/RDR/dsi-utils/src/test/ branches/RDR/dsi-utils/src/test/it/unimi/ branches/RDR/dsi-utils/src/test/it/unimi/dsi/ branches/RDR/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/RDR/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/RDR/osgi/ branches/RDR/src/resources/bin/config/ Index: branches/RDR =================================================================== --- branches/RDR 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR ___________________________________________________________________ Modified: svn:mergeinfo ## -1,6 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0:7665-7913 +/branches/BIGDATA_RELEASE_1_3_0:7665-7913,8137-8142 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 \ No newline at end of property Index: branches/RDR/bigdata/lib/jetty =================================================================== --- branches/RDR/bigdata/lib/jetty 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata/lib/jetty 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata/lib/jetty ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/MGC_1_3_0/bigdata/lib/jetty:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/RDR/bigdata/src/java/com/bigdata/bop/aggregate =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/bop/aggregate 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata/src/java/com/bigdata/bop/aggregate 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata/src/java/com/bigdata/bop/aggregate ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Modified: branches/RDR/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java 2014-04-30 19:13:09 UTC (rev 8159) @@ -1,243 +1,251 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Aug 25, 2010 - */ - -package com.bigdata.bop.bset; - -import java.util.Arrays; -import java.util.Map; -import java.util.concurrent.Callable; -import java.util.concurrent.FutureTask; - -import com.bigdata.bop.BOp; -import com.bigdata.bop.BOpContext; -import com.bigdata.bop.IBindingSet; -import com.bigdata.bop.IConstraint; -import com.bigdata.bop.NV; -import com.bigdata.bop.PipelineOp; -import com.bigdata.bop.engine.BOpStats; -import com.bigdata.relation.accesspath.IBlockingBuffer; - -import cutthecrap.utils.striterators.ICloseableIterator; - -/** - * An operator for conditional routing of binding sets in a pipeline. The - * operator will copy binding sets either to the default sink (if a condition is - * satisfied) and otherwise to the alternate sink (iff one is specified). If a - * solution fails the constraint and the alternate sink is not specified, then - * the solution is dropped. - * <p> - * Conditional routing can be useful where a different data flow is required - * based on the type of an object (for example a term identifier versus an - * inline term in the RDF database) or where there is a need to jump around a - * join group based on some condition. - * <p> - * Conditional routing will cause reordering of solutions when the alternate - * sink is specified as some solutions will flow to the primary sink while - * others flow to the alterate sink. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id: ConditionalRoutingOp.java 7773 2014-01-11 12:49:05Z thompsonbry - * $ - */ -public class ConditionalRoutingOp extends PipelineOp { - - /** - * - */ - private static final long serialVersionUID = 1L; - - public interface Annotations extends PipelineOp.Annotations { - - /** - * An {@link IConstraint} which specifies the condition. When the - * condition is satisfied the binding set is routed to the default sink. - * When the condition is not satisfied, the binding set is routed to the - * alternative sink. - */ - String CONDITION = ConditionalRoutingOp.class.getName() + ".condition"; - - } - - /** - * Deep copy constructor. - * - * @param op - */ - public ConditionalRoutingOp(final ConditionalRoutingOp op) { - - super(op); - - } - - /** - * Shallow copy constructor. - * - * @param args - * @param annotations - */ - public ConditionalRoutingOp(final BOp[] args, - final Map<String, Object> annotations) { - - super(args, annotations); - - } - - public ConditionalRoutingOp(final BOp[] args, final NV... anns) { - - this(args, NV.asMap(anns)); - - } - - /** - * @see Annotations#CONDITION - */ - public IConstraint getCondition() { - - return (IConstraint) getProperty(Annotations.CONDITION); - - } - - @Override - public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { - - return new FutureTask<Void>(new ConditionalRouteTask(this, context)); - - } - - /** - * Copy the source to the sink or the alternative sink depending on the - * condition. - */ - static private class ConditionalRouteTask implements Callable<Void> { - - private final BOpStats stats; - - private final IConstraint condition; - - private final ICloseableIterator<IBindingSet[]> source; - - private final IBlockingBuffer<IBindingSet[]> sink; - - private final IBlockingBuffer<IBindingSet[]> sink2; - - ConditionalRouteTask(final ConditionalRoutingOp op, - final BOpContext<IBindingSet> context) { - - this.stats = context.getStats(); - - this.condition = op.getCondition(); - - if (condition == null) - throw new IllegalArgumentException(); - - this.source = context.getSource(); - - this.sink = context.getSink(); - - this.sink2 = context.getSink2(); // MAY be null. - -// if (sink2 == null) -// throw new IllegalArgumentException(); - - if (sink == sink2) - throw new IllegalArgumentException(); - - } - - @Override - public Void call() throws Exception { - try { - while (source.hasNext()) { - - final IBindingSet[] chunk = source.next(); - - stats.chunksIn.increment(); - stats.unitsIn.add(chunk.length); - - final IBindingSet[] def = new IBindingSet[chunk.length]; - final IBindingSet[] alt = sink2 == null ? null - : new IBindingSet[chunk.length]; - - int ndef = 0, nalt = 0; - - for (int i = 0; i < chunk.length; i++) { - - final IBindingSet bset = chunk[i].clone(); - - if (condition.accept(bset)) { - - // solution passes condition. default sink. - def[ndef++] = bset; - - } else if (sink2 != null) { - - // solution fails condition. alternative sink. - alt[nalt++] = bset; - - } - - } - - if (ndef > 0) { - if (ndef == def.length) - sink.add(def); - else - sink.add(Arrays.copyOf(def, ndef)); -// stats.chunksOut.increment(); -// stats.unitsOut.add(ndef); - } - - if (nalt > 0 && sink2 != null) { - if (nalt == alt.length) - sink2.add(alt); - else - sink2.add(Arrays.copyOf(alt, nalt)); -// stats.chunksOut.increment(); -// stats.unitsOut.add(nalt); - } - - } - - sink.flush(); - if (sink2 != null) - sink2.flush(); - - return null; - - } finally { - source.close(); - sink.close(); - if (sink2 != null) - sink2.close(); - - } - - } // call() - - } // ConditionalRoutingTask. - -} +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Aug 25, 2010 + */ + +package com.bigdata.bop.bset; + +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.FutureTask; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.BOpContext; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IConstraint; +import com.bigdata.bop.NV; +import com.bigdata.bop.PipelineOp; +import com.bigdata.bop.engine.BOpStats; +import com.bigdata.relation.accesspath.IBlockingBuffer; + +import cutthecrap.utils.striterators.ICloseableIterator; + +/** + * An operator for conditional routing of binding sets in a pipeline. The + * operator will copy binding sets either to the default sink (if a condition is + * satisfied) and otherwise to the alternate sink (iff one is specified). If a + * solution fails the constraint and the alternate sink is not specified, then + * the solution is dropped. + * <p> + * Conditional routing can be useful where a different data flow is required + * based on the type of an object (for example a term identifier versus an + * inline term in the RDF database) or where there is a need to jump around a + * join group based on some condition. + * <p> + * Conditional routing will cause reordering of solutions when the alternate + * sink is specified as some solutions will flow to the primary sink while + * others flow to the alterate sink. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: ConditionalRoutingOp.java 7773 2014-01-11 12:49:05Z thompsonbry + * $ + */ +public class ConditionalRoutingOp extends PipelineOp { + + /** + * + */ + private static final long serialVersionUID = 1L; + + public interface Annotations extends PipelineOp.Annotations { + + /** + * An {@link IConstraint} which specifies the condition. When the + * condition is satisfied the binding set is routed to the default sink. + * When the condition is not satisfied, the binding set is routed to the + * alternative sink. + */ + String CONDITION = ConditionalRoutingOp.class.getName() + ".condition"; + + } + + /** + * Deep copy constructor. + * + * @param op + */ + public ConditionalRoutingOp(final ConditionalRoutingOp op) { + + super(op); + + } + + /** + * Shallow copy constructor. + * + * @param args + * @param annotations + */ + public ConditionalRoutingOp(final BOp[] args, + final Map<String, Object> annotations) { + + super(args, annotations); + + } + + public ConditionalRoutingOp(final BOp[] args, final NV... anns) { + + this(args, NV.asMap(anns)); + + } + + /** + * @see Annotations#CONDITION + */ + public IConstraint getCondition() { + + return (IConstraint) getProperty(Annotations.CONDITION); + + } + + @Override + public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { + + return new FutureTask<Void>(new ConditionalRouteTask(this, context)); + + } + + /** + * Copy the source to the sink or the alternative sink depending on the + * condition. + */ + static private class ConditionalRouteTask implements Callable<Void> { + + private final BOpStats stats; + + private final IConstraint condition; + + private final ICloseableIterator<IBindingSet[]> source; + + private final IBlockingBuffer<IBindingSet[]> sink; + + private final IBlockingBuffer<IBindingSet[]> sink2; + + ConditionalRouteTask(final ConditionalRoutingOp op, + final BOpContext<IBindingSet> context) { + + this.stats = context.getStats(); + + this.condition = op.getCondition(); + + if (condition == null) + throw new IllegalArgumentException(); + + this.source = context.getSource(); + + this.sink = context.getSink(); + + this.sink2 = context.getSink2(); // MAY be null. + +// if (sink2 == null) +// throw new IllegalArgumentException(); + + if (sink == sink2) + throw new IllegalArgumentException(); + + } + + @Override + public Void call() throws Exception { + try { + while (source.hasNext()) { + + final IBindingSet[] chunk = source.next(); + + stats.chunksIn.increment(); + stats.unitsIn.add(chunk.length); + + final IBindingSet[] def = new IBindingSet[chunk.length]; + final IBindingSet[] alt = sink2 == null ? null + : new IBindingSet[chunk.length]; + + int ndef = 0, nalt = 0; + + for (int i = 0; i < chunk.length; i++) { + + if (i % 20 == 0 && Thread.interrupted()) { + + // Eagerly notice if the operator is interrupted. + throw new RuntimeException( + new InterruptedException()); + + } + + final IBindingSet bset = chunk[i].clone(); + + if (condition.accept(bset)) { + + // solution passes condition. default sink. + def[ndef++] = bset; + + } else if (sink2 != null) { + + // solution fails condition. alternative sink. + alt[nalt++] = bset; + + } + + } + + if (ndef > 0) { + if (ndef == def.length) + sink.add(def); + else + sink.add(Arrays.copyOf(def, ndef)); +// stats.chunksOut.increment(); +// stats.unitsOut.add(ndef); + } + + if (nalt > 0 && sink2 != null) { + if (nalt == alt.length) + sink2.add(alt); + else + sink2.add(Arrays.copyOf(alt, nalt)); +// stats.chunksOut.increment(); +// stats.unitsOut.add(nalt); + } + + } + + sink.flush(); + if (sink2 != null) + sink2.flush(); + + return null; + + } finally { + source.close(); + sink.close(); + if (sink2 != null) + sink2.close(); + + } + + } // call() + + } // ConditionalRoutingTask. + +} Index: branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/RDR/bigdata/src/java/com/bigdata/bop/util =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/bop/util 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata/src/java/com/bigdata/bop/util 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata/src/java/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/util:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/RDR/bigdata/src/java/com/bigdata/htree/raba =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/htree/raba 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata/src/java/com/bigdata/htree/raba 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata/src/java/com/bigdata/htree/raba ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/htree/raba:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/htree/raba:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/RDR/bigdata/src/java/com/bigdata/jsr166 =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/jsr166 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata/src/java/com/bigdata/jsr166 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata/src/java/com/bigdata/jsr166 ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/jsr166:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/jsr166:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/jsr166:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Modified: branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt =================================================================== --- branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt 2014-04-30 19:13:09 UTC (rev 8159) @@ -46,11 +46,12 @@ ------------- The "Boto" python library for the AWS API must be installed in order to instantiate the cluster. If not already installed: - % pip install boto + % sudo pip install pycrypto + % sudo pip install boto alternately: - % easy_install boto + % sudo easy_install boto If while running the python scripts the error message appears "ImportError: No module named boto", you will need to set the Index: branches/RDR/bigdata/src/test/com/bigdata/bop/joinGraph =================================================================== --- branches/RDR/bigdata/src/test/com/bigdata/bop/joinGraph 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata/src/test/com/bigdata/bop/joinGraph 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata/src/test/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/bop/joinGraph:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/bop/joinGraph:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4486-4522 /branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/RDR/bigdata/src/test/com/bigdata/bop/util =================================================================== --- branches/RDR/bigdata/src/test/com/bigdata/bop/util 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata/src/test/com/bigdata/bop/util 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata/src/test/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/bop/util:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/bop/util:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/util:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/util:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/bop/util:4486-4522 /branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/util:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/RDR/bigdata/src/test/com/bigdata/jsr166 =================================================================== --- branches/RDR/bigdata/src/test/com/bigdata/jsr166 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata/src/test/com/bigdata/jsr166 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata/src/test/com/bigdata/jsr166 ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/jsr166:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/jsr166:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/jsr166:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/jsr166:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/jsr166:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/jsr166:4486-4522 /branches/MGC_1_3_0/bigdata/src/test/com/bigdata/jsr166:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/RDR/bigdata/src/test/com/bigdata/util/httpd =================================================================== --- branches/RDR/bigdata/src/test/com/bigdata/util/httpd 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata/src/test/com/bigdata/util/httpd 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata/src/test/com/bigdata/util/httpd ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/util/httpd:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/util/httpd:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/util/httpd:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/util/httpd:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/util/httpd:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4486-4522 /branches/MGC_1_3_0/bigdata/src/test/com/bigdata/util/httpd:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/RDR/bigdata-compatibility =================================================================== --- branches/RDR/bigdata-compatibility 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-compatibility 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata-compatibility ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata-compatibility:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-compatibility:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-compatibility:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata-compatibility:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata-compatibility:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata-compatibility:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-compatibility:4175-4387 /branches/MGC_1_3_0/bigdata-compatibility:7609-7752 \ No newline at end of property Index: branches/RDR/bigdata-jini/src/java/com/bigdata/attr =================================================================== --- branches/RDR/bigdata-jini/src/java/com/bigdata/attr 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-jini/src/java/com/bigdata/attr 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata-jini/src/java/com/bigdata/attr ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/attr:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-jini/src/java/com/bigdata/attr:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/attr:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/attr:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/attr:7665-8131,8137-8142 /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/attr:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-jini/src/java/com/bigdata/attr:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-jini/src/java/com/bigdata/attr:4486-4522 \ No newline at end of property Index: branches/RDR/bigdata-jini/src/java/com/bigdata/disco =================================================================== --- branches/RDR/bigdata-jini/src/java/com/bigdata/disco 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-jini/src/java/com/bigdata/disco 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata-jini/src/java/com/bigdata/disco ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/disco:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-jini/src/java/com/bigdata/disco:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/disco:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/disco:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/disco:7665-8131,8137-8142 /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/disco:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-jini/src/java/com/bigdata/disco:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-jini/src/java/com/bigdata/disco:4486-4522 \ No newline at end of property Index: branches/RDR/bigdata-jini/src/java/com/bigdata/util/config =================================================================== --- branches/RDR/bigdata-jini/src/java/com/bigdata/util/config 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-jini/src/java/com/bigdata/util/config 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata-jini/src/java/com/bigdata/util/config ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/util/config:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-jini/src/java/com/bigdata/util/config:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/util/config:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/util/config:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/util/config:7665-8131,8137-8142 /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-jini/src/java/com/bigdata/util/config:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:4486-4522 \ No newline at end of property Index: branches/RDR/bigdata-perf =================================================================== --- branches/RDR/bigdata-perf 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-perf 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata-perf ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata-perf:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-perf:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-perf:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata-perf:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata-perf:7665-8131,8137-8142 /branches/BTREE_BUFFER_BRANCH/bigdata-perf:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-perf:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-perf:4486-4522 \ No newline at end of property Index: branches/RDR/bigdata-perf/btc =================================================================== --- branches/RDR/bigdata-perf/btc 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-perf/btc 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata-perf/btc ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/btc:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-perf/btc:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/btc:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/btc:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/btc:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata-perf/btc:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-perf/btc:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/btc:4175-4387 \ No newline at end of property Index: branches/RDR/bigdata-perf/btc/src/resources =================================================================== --- branches/RDR/bigdata-perf/btc/src/resources 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-perf/btc/src/resources 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata-perf/btc/src/resources ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/btc/src/resources:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-perf/btc/src/resources:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/btc/src/resources:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/btc/src/resources:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/btc/src/resources:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata-perf/btc/src/resources:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-perf/btc/src/resources:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/btc/src/resources:4175-4387 \ No newline at end of property Index: branches/RDR/bigdata-perf/lubm =================================================================== --- branches/RDR/bigdata-perf/lubm 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-perf/lubm 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata-perf/lubm ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/lubm:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-perf/lubm:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/lubm:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/lubm:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/lubm:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata-perf/lubm:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/lubm:4175-4387 /branches/MGC_1_3_0/bigdata-perf/lubm:7609-7752 \ No newline at end of property Index: branches/RDR/bigdata-perf/uniprot =================================================================== --- branches/RDR/bigdata-perf/uniprot 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-perf/uniprot 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata-perf/uniprot ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/uniprot:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-perf/uniprot:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/uniprot:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/uniprot:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/uniprot:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata-perf/uniprot:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-perf/uniprot:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/uniprot:4175-4387 \ No newline at end of property Index: branches/RDR/bigdata-perf/uniprot/src =================================================================== --- branches/RDR/bigdata-perf/uniprot/src 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-perf/uniprot/src 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata-perf/uniprot/src ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/uniprot/src:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-perf/uniprot/src:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/uniprot/src:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/uniprot/src:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/uniprot/src:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata-perf/uniprot/src:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-perf/uniprot/src:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/uniprot/src:4175-4387 \ No newline at end of property Index: branches/RDR/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:4486-4522 /branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/changesets =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/changesets 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/changesets 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/changesets ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/changesets:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-rdf/src/java/com/bigdata/rdf/changesets:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-rdf/src/java/com/bigdata/rdf/changesets:4175-4387 /branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets:7609-7752 \ No newline at end of property Index: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/error =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/error 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/error 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/error ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/error:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-rdf/src/java/com/bigdata/rdf/error:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/error:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/error:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/error:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/error:4486-4522 /branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/error:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/error:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal 2014-04-30 19:13:09 UTC (rev 8159) Property changes on: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal ___________________________________________________________________ Modified: svn:mergeinfo ## -1,7 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/internal:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-rdf/src/java/com/bigdata/rdf/internal:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/internal:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal:7665-8131 +/branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal:7665-8131,8137-8142 /branches/INT64_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-rdf/src/java/com/bigdata/rdf/internal:4175-4387 /branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal:7609-7752 \ No newline at end of property Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java 2014-04-30 18:52:56 UTC (rev 8158) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java 2014-04-30 19:13:09 UTC (rev 8159) @@ -44,17 +44,17 @@ * SPARQL REGEX operator. */ public class RegexBOp extends XSDBooleanIVValueExpression - implements INeedsMaterialization { + implements INeedsMaterialization { /** - * - */ - private static final long serialVersionUID = 1357420268214930143L; - - private static final transient Logger log = Logger.getLogger(RegexBOp.class); + * + */ + private static final long serialVersionUID = 1357420268214930143L; + + private static final transient Logger log = Logger.getLogger(RegexBOp.class); public interface Annotations extends XSDBooleanIVValueExpression.Annotations { - + /** * The cached regex pattern. */ @@ -64,65 +64,65 @@ } private static Map<String,Object> anns( - final IValueExpression<? extends IV> pattern, - final IValueExpression<? extends IV> flags) { - - try { - - if (pattern instanceof IConstant && - (flags == null || flags instanceof IConstant)) { - - final IV parg = ((IConstant<IV>) pattern).get(); - - final IV farg = flags != null ? - ((IConstant<IV>) flags).get() : null; - - if (parg.hasValue() && (farg == null || farg.hasValue())) { - - final Value pargVal = parg.getValue(); - - final Value fargVal = farg != null ? farg.getValue() : null; - - return NV.asMap( - new NV(Annotations.PATTERN, - getPattern(pargVal, fargVal))); - - } - - } - - } catch (Exception ex) { - - if (log.isInfoEnabled()) { - log.info("could not create pattern for: " + pattern + ", " + flags); - } - - } - - return BOp.NOANNS; - + final IValueExpression<? extends IV> pattern, + final IValueExpression<? extends IV> flags) { + + try { + + if (pattern instanceof IConstant && + (flags == null || flags instanceof IConstant)) { + + final IV parg = ((IConstant<IV>) pattern).get(); + + final IV farg = flags != null ? + ((IConstant<IV>) flags).get() : null; + + if (parg.hasValue() && (farg == null || farg.hasValue())) { + + final Value pargVal = parg.getValue(); + + final Value fargVal = farg != null ? farg.getValue() : null; + + return NV.asMap( + new NV(Annotations.PATTERN, + getPattern(pargVal, fargVal))); + + } + + } + + } catch (Exception ex) { + + if (log.isInfoEnabled()) { + log.info("could not create pattern for: " + pattern + ", " + flags); + } + + } + + return BOp.NOANNS; + } - /** - * Construct a regex bop without flags. - */ + /** + * Construct a regex bop without flags. + */ @SuppressWarnings("rawtypes") - public RegexBOp( - final IValueExpression<? extends IV> var, - final IValueExpression<? extends IV> pattern) { + public RegexBOp( + final IValueExpression<? extends IV> var, + final IValueExpression<? extends IV> pattern) { this(new BOp[] { var, pattern }, anns(pattern, null)); } - /** - * Construct a regex bop with flags. - */ - @SuppressWarnings("rawtypes") + /** + * Construct a regex bop with flags. + */ + @SuppressWarnings("rawtypes") public RegexBOp( - final IValueExpression<? extends IV> var, - final IValueExpression<? extends IV> pattern, - final IValueExpression<? extends IV> flags) { + final IValueExpression<? extends IV> var, + final IValueExpression<? extends IV> pattern, + final IValueExpression<? extends IV> flags) { this(new BOp[] { var, pattern, flags }, anns(pattern, flags)); @@ -133,8 +133,8 @@ */ public RegexBOp(final BOp[] args, final Map<String, Object> anns) { - super(args, anns); - + super(args, anns); + if (args.length < 2 || args[0] == null || args[1] == null) throw new IllegalArgumentException(); @@ -146,33 +146,34 @@ public RegexBOp(final RegexBOp op) { super(op); } - + + @Override public Requirement getRequirement() { - - return INeedsMaterialization.Requirement.SOMETIMES; - + + return INeedsMaterialization.Requirement.SOMETIMES; + } - + + @Override public boolean accept(final IBindingSet bs) { - - @SuppressWarnings("rawtypes") + final Value var = asValue(getAndCheckBound(0, bs)); - + @SuppressWarnings("rawtypes") final IV pattern = getAndCheckBound(1, bs); @SuppressWarnings("rawtypes") final IV flags = arity() > 2 ? get(2).get(bs) : null; - + if (log.isDebugEnabled()) { - log.debug("regex var: " + var); - log.debug("regex pattern: " + pattern); - log.debug("regex flags: " + flags); + log.debug("regex var: " + var); + log.debug("regex pattern: " + pattern); + log.debug("regex flags: " + flags); } - - return accept(var, pattern.getValue(), - flags != null ? flags.getValue() : null); + return accept(var, pattern.getValue(), flags != null ? flags.getValue() + : null); + } /** @@ -185,67 +186,87 @@ * REGEXBOp should cache the Pattern when it is a constant </a> */ private boolean accept(final Value arg, final Value parg, final Value farg) { - + if (log.isDebugEnabled()) { - log.debug("regex var: " + arg); - log.debug("regex pattern: " + parg); - log.debug("regex flags: " + farg); + log.debug("regex var: " + arg); + log.debug("regex pattern: " + parg); + log.debug("regex flags: " + farg); } - + if (QueryEvaluationUtil.isSimpleLiteral(arg)) { - + final String text = ((Literal) arg).getLabel(); - + try { - - // first check for cached pattern - Pattern pattern = (Pattern) getProperty(Annotations.PATTERN); - if (pattern == null) { - pattern = getPattern(parg, farg); - } + + // first check for cached pattern + Pattern pattern = (Pattern) getProperty(Annotations.PATTERN); + + if (pattern == null) { + + // resolve the pattern. NB: NOT cached. + pattern = getPattern(parg, farg); + + } + + if (Thread.interrupted()) { + + /* + * Eagerly notice if the operator is interrupted. + * + * Note: Regex can be a high latency operation for a large + * RDF Literal. Therefore we want to check for an interrupt + * before each regex test. The Pattern code itself will not + * notice an interrupt.... + */ + throw new RuntimeException(new InterruptedException()); + + } + final boolean result = pattern.matcher(text).find(); + return result; - + } catch (IllegalArgumentException ex) { - - throw new SparqlTypeErrorException(); - + + throw new SparqlTypeErrorException(); + } - + } else { - - throw new SparqlTypeErrorException(); - + + throw new SparqlTypeErrorException(); + } - + } - private static Pattern getPattern(final Value parg, final Value farg) - throws IllegalArgumentException { - + private static Pattern getPattern(final Value parg, final Value farg) + throws IllegalArgumentException { + if (log.isDebugEnabled()) { - log.debug("regex pattern: " + parg); - log.debug("regex flags: " + farg); + log.debug("regex pattern: " + parg); + log.debug("regex flags: " + farg); } if (QueryEvaluationUtil.isSimpleLiteral(parg) && (farg == null || QueryEvaluationUtil.isSimpleLiteral(farg))) { final String ptn = ((Literal) parg).getLabel(); - String flags = ""; - if (farg != null) { - flags = ((Literal)farg).getLabel(); - } - int f = 0; - for (char c : flags.toCharArray()) { - switch (c) { - case 's': - f |= Pattern.DOTALL; - break; - case 'm': - f |= Pattern.MULTILINE; - break; - case 'i': { + String flags = ""; + if (farg != null) { + flags = ((Literal)farg).getLabel(); + } + int f = 0; + for (char c : flags.toCharArray()) { + switch (c) { + case 's': + f |= Pattern.DOTALL; + break; + case 'm': + f |= Pattern.MULTILINE; + break; + case 'i': { /* ... [truncated message content] |
From: <tho...@us...> - 2014-04-30 18:53:01
|
Revision: 8158 http://sourceforge.net/p/bigdata/code/8158 Author: thompsonbry Date: 2014-04-30 18:52:56 +0000 (Wed, 30 Apr 2014) Log Message: ----------- adding the indexLBS.html file to SVN. This will be removed soon.... It is a variant on the old index.html that targets /bigdata/LBS/(leader|read). Added Paths: ----------- branches/RDR/bigdata-war/src/html/indexLBS.html Added: branches/RDR/bigdata-war/src/html/indexLBS.html =================================================================== --- branches/RDR/bigdata-war/src/html/indexLBS.html (rev 0) +++ branches/RDR/bigdata-war/src/html/indexLBS.html 2014-04-30 18:52:56 UTC (rev 8158) @@ -0,0 +1,144 @@ +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" +"http://www.w3.org/TR/html4/loose.dtd"> +<html><!-- This is a version of the old index.html that targets the HA load balancer. This file will be removed as part of the HA LBS ticket. --> +<head profile="http://www.w3.org/2005/10/profile"> +<link rel="icon" + type="image/png" + href="/bigdata/html/favicon.ico" /> +<meta http-equiv="Content-Type" content="text/html;charset=utf-8" > +<title>bigdata® NanoSparqlServer</title> +<!-- $Id: index.html 7921 2014-03-10 21:15:00Z thompsonbry $ --> +<!-- junit test marker: index.html --> +</head> +<body> + +<h2>Welcome to bigdata®.</h2> +<p>Please consult the +<a href="http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer" + target="_blank" + > documentation</a> for information on using the NanoSparqlServer's REST Api. </br>See the + <a href="http://wiki.bigdata.com/wiki/index.php/Main_Page" + target="_blank" + >wiki</a> for help on query optimization, bigdata SPARQL extensions, etc. +</p> + +<p> +The following URLs should be active when deployed in the default configuration: +</p> +<dl> +<dt>http://hostname:port/bigdata</dt> +<dd>This page.</dd> +<dt>http://hostname:port/bigdata/LBS/sparql</dt> +<dd>The SPARQL REST API (<a href="/bigdata/LBS/sparql">Service Description + VoID Description</a>).</dd> +<dt>http://hostname:port/bigdata/namespace</dt> +<dd>VoID <a href="/bigdata/LBS/namespace">graph of available KBs</a> from this service.</dd> +<dt>http://hostname:port/bigdata/status</dt> +<dd>A <a href="/bigdata/status">status</a> page.</dd> +<dt>http://hostname:port/bigdata/counters</dt> +<dd>A <a href="/bigdata/counters"> performance counters</a> page.</dd> +</dl> + +<p> +Where <i>hostname</i> is the name of this host and <i>port</i> is the port at +which this page was accessed. +</p> + +<!-- Note: Some applications (firefox 7) can not handle a GET with a very long + URL. For that reason ONLY this operation defaults to a POST. You SHOULD + use GET for database queries since they are, by and large, idempotent. + --> +<h2><a href="http://www.w3.org/TR/sparql11-query/" + title="W3C SPARQL 1.1 Query Recommendation" + target="_blank" + > SPARQL Query </a></h2> +<FORM action="/bigdata/LBS/read/sparql" method="GET" name="QUERY"> + <P> + <TEXTAREA name="query" rows="10" cols="80" title="Enter SPARQL Query." + >SELECT * { ?s ?p ?o } LIMIT 1</TEXTAREA> + </P><P> + Tenant Namespace + <INPUT type="text" name="namespace" title="Tenant namespace." + > (leave empty for default KB) + </P><P> + <INPUT type="submit" value="Send" title="Submit query."> + <INPUT type="checkbox" name="explain" value="true" + title="Explain query plan rather than returning the query results." + > Explain + (<INPUT type="checkbox" name="explain" value="details" + title="Explain query plan rather than returning the query results (with extra details)." + > Details) + <INPUT type="checkbox" name="analytic" value="true" + title="Enable the analytic query package." + > Analytic +<!-- TODO Uncomment to reveal the RTO option. --> + <INPUT type="checkbox" name="RTO" value="true" + title="Enable the Runtime Query Optimizer (RTO) - This is an alpha feature." + > RTO (Alpha) +<!-- --> + <INPUT type="checkbox" name="xhtml" value="true" + title="Request XHTML response (results formatted as table)." + checked="checked" + > XHTML + </P> +</FORM> +<h2><a href="http://www.w3.org/TR/sparql11-update/" + title="W3C SPARQL Update Recommendation" + target="_blank" + >SPARQL Update</a></h2> +<FORM action="/bigdata/LBS/leader/sparql" method="post"> + <P> + <TEXTAREA name="update" rows="10" cols="80" title="Enter SPARQL Update." + > +PREFIX dc: <http://purl.org/dc/elements/1.1/> +INSERT DATA +{ + <http://example/book1> dc:title "A new book" ; + dc:creator "A.N.Other" . +}</TEXTAREA> + </P><P> + Tenant Namespace + <INPUT type="text" name="namespace" title="Tenant namespace." + > (leave empty for default KB) + </P><P> + <INPUT type="submit" value="Send" title="Submit Update."> + <!--INPUT type="checkbox" name="explain" value="true" + title="Explain query plan rather than returning the query results." + > Explain--> + <INPUT type="checkbox" name="analytic" value="true" + title="Enable the analytic query package." + > Analytic + <INPUT type="checkbox" name="monitor" value="true" + title="Monitor the execution of the UPDATE request." + checked="checked" + > Monitor + </P> +</FORM> +<p> +<!-- Note: Some common characters need to be escaped here and also in the SPARQL + examples above. + --> +Here are some useful namespaces: +</p> +<pre> +prefix dc: <http://purl.org/dc/elements/1.1/> +prefix xsd: <http://www.w3.org/2001/XMLSchema#> +prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> +prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> +prefix owl: <http://www.w3.org/2002/07/owl#> +prefix foaf: <http://xmlns.com/foaf/0.1/> +prefix hint: <http://www.bigdata.com/queryHints#> +prefix bd: <http://www.bigdata.com/rdf#> +prefix bds: <http://www.bigdata.com/rdf/search#> +</pre> +<!-- Note: Use SPARQL Update "LOAD" instead. +<h2>Upload Data (URL):</h2> +<form action="sparql" method="post"> + <p> + <textarea name="uri" rows="1" cols="100">file:/</textarea> + </p><p> + <input type="submit" value="Upload"> + </p> +</form> +--> +</body> +</html> \ No newline at end of file Property changes on: branches/RDR/bigdata-war/src/html/indexLBS.html ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-30 18:49:25
|
Revision: 8157 http://sourceforge.net/p/bigdata/code/8157 Author: thompsonbry Date: 2014-04-30 18:49:20 +0000 (Wed, 30 Apr 2014) Log Message: ----------- Refactor of the HA load balancer. At this point the NOP, round-robin, and ganglia-based policies work against their test suites. The client must indicate whether a request will be proxied to the quorum leader (/bigdata/LBS/leader/...) or whether it will be load balanced across the joined services (/bigdata/LBS/read/...). @see #624 (HA LBS). Modified Paths: -------------- branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHANamespace.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java branches/RDR/bigdata-war/src/WEB-INF/web.xml Added Paths: ----------- branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3LoadBalancerTestCase.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll_LBS.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_GangliaLBS.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_NOP.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_RoundRobin.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractLBSPolicy.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/HostScore.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/ServiceScore.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/NOPLBSPolicy.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/RoundRobinLBSPolicy.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/DefaultHostScoringRule.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/GangliaLBSPolicy.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/HostTable.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/IHostScoringRule.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/LoadOneHostScoringRule.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/NOPHostScoringRule.java Modified: branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java =================================================================== --- branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java 2014-04-30 18:39:03 UTC (rev 8156) +++ branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java 2014-04-30 18:49:20 UTC (rev 8157) @@ -202,7 +202,7 @@ * * @see ConfigurationOptions */ - private ServiceID serviceID; + private final AtomicReference<ServiceID> serviceIDRef = new AtomicReference<ServiceID>(); /** * The directory for the service. This is the directory within which the @@ -377,7 +377,7 @@ */ public ServiceID getServiceID() { - return serviceID; + return serviceIDRef.get(); } @@ -674,15 +674,12 @@ if (serviceIdFile.exists()) { + final ServiceID tmp; try { - // Read from file, set on class. - this.serviceID = readServiceId(serviceIdFile); + // Read from file. + tmp = readServiceId(serviceIdFile); - if (log.isInfoEnabled()) - log.info("Existing service instance: serviceID=" - + serviceID); - } catch (IOException ex) { fatal("Could not read serviceID from existing file: " @@ -691,6 +688,12 @@ } + if (log.isInfoEnabled()) + log.info("Existing service instance: serviceID=" + tmp); + + // Set the ServiceID that we read from the file. + setServiceID(tmp); + } else { if (log.isInfoEnabled()) @@ -725,9 +728,10 @@ * want to use that ServiceID and not whatever was in the * Configuration. */ - UUID serviceUUID = this.serviceID == null ? null : JiniUtil - .serviceID2UUID(this.serviceID); - + final ServiceID serviceID = getServiceID(); + UUID serviceUUID = serviceID == null ? null : JiniUtil + .serviceID2UUID(serviceID); + for (Entry e : entries) { if (e instanceof Name && serviceName == null) { @@ -796,13 +800,16 @@ // if serviceUUID assigned then set ServiceID from it now. if (serviceUUID != null) { - if (this.serviceID != null) { + // Convert ServiceID read from Configuration. + final ServiceID tmp = JiniUtil.uuid2ServiceID(serviceUUID); - // Convert ServiceID read from Configuration. - final ServiceID tmp = JiniUtil - .uuid2ServiceID(serviceUUID); + // Already assigned ServiceID (may be null). + final ServiceID existingServiceID = getServiceID(); + + if (existingServiceID != null) { - if (!this.serviceID.equals(tmp)) { + // Already set. + if (!existingServiceID.equals(tmp)) { /* * This is a paranoia check on the Configuration and @@ -816,19 +823,21 @@ + serviceIdFile + " : Configuration=" + tmp + ", but expected =" - + this.serviceID); + + existingServiceID); } + } else { + + // Set serviceID. + setServiceID(JiniUtil.uuid2ServiceID(serviceUUID)); + } - // Set serviceID. - this.serviceID = JiniUtil.uuid2ServiceID(serviceUUID); - if (!serviceIdFile.exists()) { // write the file iff it does not exist. - writeServiceIDOnFile(this.serviceID); + writeServiceIDOnFile(tmp); } else { /* @@ -836,16 +845,16 @@ * assigned ServiceID. */ try { - final ServiceID tmp = readServiceId(serviceIdFile); - if (!this.serviceID.equals(tmp)) { + final ServiceID tmp2 = readServiceId(serviceIdFile); + if (!tmp.equals(tmp2)) { /* * The assigned ServiceID and ServiceID written * on the file do not agree. */ throw new RuntimeException( - "Entry has ServiceID=" + this.serviceID + "Entry has ServiceID=" + tmp + ", but file as ServiceID=" - + tmp); + + tmp2); } } catch (IOException e1) { throw new RuntimeException(e1); @@ -860,11 +869,15 @@ * ourselves. */ - // set serviceID. - this.serviceID = JiniUtil.uuid2ServiceID(UUID.randomUUID()); + // Generate a random ServiceID. + final ServiceID tmp = JiniUtil.uuid2ServiceID(UUID + .randomUUID()); - // write the file iff it does not exist. - writeServiceIDOnFile(this.serviceID); + // Set our ServiceID. + setServiceID(tmp); + + // Write the file iff it does not exist. + writeServiceIDOnFile(tmp); } @@ -908,7 +921,7 @@ public String toString() { // note: MAY be null. - final ServiceID serviceID = this.serviceID; + final ServiceID serviceID = this.serviceIDRef.get(); return getClass().getName() + "{serviceName=" @@ -1153,7 +1166,9 @@ // The as-configured Entry[] attributes. final Entry[] attributes = entries.toArray(new Entry[0]); - if (this.serviceID != null) { + final ServiceID serviceID = getServiceID(); + + if (serviceID != null) { /* * We read the serviceID from local storage (either the @@ -1188,28 +1203,30 @@ throw new AssertionError();// keeps compiler happy. } - /* - * Note: This is synchronized in case set via listener by the - * JoinManager, which would be rather fast action on its part. - */ - synchronized (this) { +// /* +// * Note: This is synchronized in case set via listener by the +// * JoinManager, which would be rather fast action on its part. +// */ +// synchronized (this) { +// +// final ServiceID serviceID = getServiceID(); +// +// if (serviceID != null) { +// +// /* +// * Notify the service that it's service UUID has been set. +// * +// * @todo Several things currently depend on this notification. +// * In effect, it is being used as a proxy for the service +// * registration event. +// */ +// +// notifyServiceUUID(serviceID); +// +// } +// +// } - if (this.serviceID != null) { - - /* - * Notify the service that it's service UUID has been set. - * - * @todo Several things currently depend on this notification. - * In effect, it is being used as a proxy for the service - * registration event. - */ - - notifyServiceUUID(serviceID); - - } - - } - } /** @@ -1313,6 +1330,25 @@ } + private void setServiceID(final ServiceID newValue) { + + if (newValue == null) + throw new IllegalArgumentException(); + + if (!serviceIDRef.compareAndSet(null/* expect */, newValue)) { + + throw new IllegalStateException( + "ServiceID may not be changed: ServiceID=" + + serviceIDRef.get() + ", proposed=" + newValue); + + } + + if (log.isInfoEnabled()) + log.info("serviceID=" + newValue + ", serviceUUID=" + + JiniUtil.serviceID2UUID(newValue)); + + } + /** * This method is responsible for saving the {@link ServiceID} on stable * storage when it is invoked. It will be invoked iff the {@link ServiceID} @@ -1324,28 +1360,13 @@ @Override synchronized public void serviceIDNotify(final ServiceID serviceID) { - if (serviceID == null) - throw new IllegalArgumentException(); - - if (log.isInfoEnabled()) - log.info("serviceID=" + serviceID + ", serviceUUID=" - + JiniUtil.serviceID2UUID(serviceID)); + setServiceID(serviceID); - if (this.serviceID != null && !this.serviceID.equals(serviceID)) { - - throw new IllegalStateException( - "ServiceID may not be changed: ServiceID=" + this.serviceID - + ", proposed=" + serviceID); - - } - - this.serviceID = serviceID; - assert serviceIdFile != null : "serviceIdFile not defined?"; writeServiceIDOnFile(serviceID); - notifyServiceUUID(serviceID); +// notifyServiceUUID(serviceID); /* * Update the Entry[] for the service registrars to reflect the assigned @@ -1410,35 +1431,14 @@ } - /** - * Notify the {@link AbstractService} that it's service UUID has been set. - */ - synchronized protected void notifyServiceUUID(final ServiceID serviceID) { - - if (serviceID == null) - throw new IllegalArgumentException(); - - if (this.serviceID != null && !this.serviceID.equals(serviceID)) { - - throw new IllegalStateException( - "ServiceID may not be changed: ServiceID=" + this.serviceID - + ", proposed=" + serviceID); - - } - -// if(impl != null && impl instanceof AbstractService) { +// /** +// * Notify the {@link AbstractService} that it's service UUID has been set. +// */ +// final protected void notifyServiceUUID(final ServiceID newValue) { // -// final UUID serviceUUID = JiniUtil.serviceID2UUID(serviceID); -// -// final AbstractService service = ((AbstractService) impl); -// -// service.setServiceUUID(serviceUUID); -// -// } - - this.serviceID = serviceID; - - } +// setServiceID(newValue); +// +// } /** * Logs a message. If the service is no longer registered with any Modified: branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config =================================================================== --- branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-04-30 18:39:03 UTC (rev 8156) +++ branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-04-30 18:49:20 UTC (rev 8157) @@ -272,8 +272,7 @@ new NV(com.bigdata.journal.PlatformStatsPlugIn.Options.COLLECT_PLATFORM_STATISTICS,"true"), new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN,"true"), - new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT,"true"), - new NV(com.bigdata.journal.GangliaPlugIn.Options.REPORT_DELAY,"2000"), // NB: short delay is used to develop the HALBS. +// new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT,"true"), }, bigdata.kb); Modified: branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config =================================================================== --- branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config 2014-04-30 18:39:03 UTC (rev 8156) +++ branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config 2014-04-30 18:49:20 UTC (rev 8157) @@ -274,8 +274,7 @@ new NV(com.bigdata.journal.PlatformStatsPlugIn.Options.COLLECT_PLATFORM_STATISTICS,"true"), new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN,"true"), - new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT,"true"), - new NV(com.bigdata.journal.GangliaPlugIn.Options.REPORT_DELAY,"2000"), // NB: short delay is used to develop the HALBS. +// new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT,"true"), }, bigdata.kb); Modified: branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config =================================================================== --- branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-04-30 18:39:03 UTC (rev 8156) +++ branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-04-30 18:49:20 UTC (rev 8157) @@ -269,9 +269,9 @@ new NV(AbstractTransactionService.Options.MIN_RELEASE_AGE,"1"), + new NV(com.bigdata.journal.PlatformStatsPlugIn.Options.COLLECT_PLATFORM_STATISTICS,"true"), new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN,"true"), - new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT,"true"), - new NV(com.bigdata.journal.GangliaPlugIn.Options.REPORT_DELAY,"2000"), // NB: short delay is used to develop the HALBS. +// new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT,"true"), }, bigdata.kb); Modified: branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-04-30 18:39:03 UTC (rev 8156) +++ branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-04-30 18:49:20 UTC (rev 8157) @@ -103,6 +103,8 @@ import com.bigdata.quorum.zk.ZKQuorumImpl; import com.bigdata.rdf.sail.CreateKBTask; import com.bigdata.rdf.sail.webapp.ConfigParams; +import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet; +import com.bigdata.rdf.sail.webapp.IHALoadBalancerPolicy; import com.bigdata.rdf.sail.webapp.NanoSparqlServer; import com.bigdata.rwstore.RWStore; import com.bigdata.service.AbstractHATransactionService; @@ -4665,6 +4667,47 @@ } /** + * Change the {@link IHALoadBalancerPolicy}. + * <p> + * TODO There are some intrinsic problems with this method that should be + * resolved before exposing it as an administrative API on the + * {@link HAGlue} interface. + * <p> + * (1) This only applies to running instances of the + * {@link HALoadBalancerServlet}. If an instance is started after this + * method is called, it will run with the as-configured + * {@link IHALoadBalancerPolicy} instance of the one specified in the last + * invocation of this method. + * <p> + * (2) There are various race conditions that exist with respect to: (a) the + * atomic change over of the {@link IHALoadBalancerPolicy} during an + * in-flight request; and (b) the atomic destroy of the old policy once + * there are no more in-flight requests using that old policy. + * + * TODO Either the {@link IHALoadBalancerPolicy} needs to be serializable or + * we need to pass along the class name and the configuration parameters. + * For this case, the configuration should be set from the caller specified + * values rather than those potentially associated with <code>web.xml</code> + * , especially since <code>web.xml</code> might not even have the necessary + * configuration parameters defined for the caller specified policy. + */ + public void setHALoadBalancerPolicy(final IHALoadBalancerPolicy policy) { + + final Server server = this.jettyServer; + + if (server == null) + throw new IllegalStateException(); + + final WebAppContext wac = NanoSparqlServer.getWebApp(server); + + if (log.isInfoEnabled()) + log.info("Will set LBS: wac=" + wac + ", policy: " + policy); + + HALoadBalancerServlet.setPolicy(wac.getServletContext(), policy); + + } + + /** * Conditionally create the default KB instance as identified in * <code>web.xml</code>. * Modified: branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-30 18:39:03 UTC (rev 8156) +++ branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-30 18:49:20 UTC (rev 8157) @@ -96,6 +96,7 @@ import com.bigdata.quorum.zk.ZKQuorumClient; import com.bigdata.quorum.zk.ZKQuorumImpl; import com.bigdata.rdf.sail.webapp.client.HttpException; +import com.bigdata.rdf.sail.webapp.client.RemoteRepository; import com.bigdata.service.jini.JiniClientConfig; import com.bigdata.service.jini.RemoteDestroyAdmin; import com.bigdata.util.InnerCause; @@ -1403,7 +1404,7 @@ return 3; } - + /** * Return Zookeeper quorum that can be used to reflect (or act on) the * distributed quorum state for the logical service. @@ -2791,6 +2792,26 @@ */ protected void simpleTransaction_noQuorumCheck(final HAGlue leader) throws IOException, Exception { + + simpleTransaction_noQuorumCheck(leader, false/* useLoadBalancer */); + + } + + /** + * Immediately issues a simple transaction against the service. + * + * @param haGlue + * The service (must be the leader to succeed unless using the + * load balancer). + * @param useLoadBalancer + * When <code>true</code> the LBS will be used and the update + * request may be directed to any service and will be proxied to + * the leader if necessary. + * @throws IOException + * @throws Exception + */ + protected void simpleTransaction_noQuorumCheck(final HAGlue haGlue, + final boolean useLoadBalancer) throws IOException, Exception { final StringBuilder sb = new StringBuilder(); sb.append("DROP ALL;\n"); @@ -2802,9 +2823,12 @@ final String updateStr = sb.toString(); - getRemoteRepository(leader).prepareUpdate(updateStr).evaluate(); - - } + final RemoteRepository repo = getRemoteRepository(haGlue, + useLoadBalancer); + + repo.prepareUpdate(updateStr).evaluate(); + + } /** * Verify that an attempt to read on the specified service is disallowed. Added: branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3LoadBalancerTestCase.java =================================================================== --- branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3LoadBalancerTestCase.java (rev 0) +++ branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3LoadBalancerTestCase.java 2014-04-30 18:49:20 UTC (rev 8157) @@ -0,0 +1,510 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.journal.jini.ha; + +import java.io.IOException; +import java.io.Serializable; + +import javax.servlet.ServletConfig; +import javax.servlet.ServletContext; + +import net.jini.config.Configuration; + +import com.bigdata.ha.HAGlue; +import com.bigdata.ha.HAStatusEnum; +import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.jini.ha.HAJournalServer.HAQuorumService; +import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; +import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet; +import com.bigdata.rdf.sail.webapp.IHALoadBalancerPolicy; +import com.bigdata.rdf.sail.webapp.client.RemoteRepository; +import com.bigdata.rdf.sail.webapp.client.RemoteRepository.RemoveOp; + +/** + * Test suite for the HA load balancer. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * @see <a href="http://trac.bigdata.com/ticket/624"> HA Load Balancer </a> + * + * FIXME Test query using GET and POST to ensure that we have coverage in + * the load balancer servlet for proper forwarding (to the local service) + * and proper proxying (to a remote service) for both GET and POST. Also + * test PUT and DELETE methods. Tests need to be written to the leader (for + * forward to the local machine) and the follower (for proxying to the + * leader). + * <p> + * Test coverage should also extend to a service that is running but not + * joined with the met quorum. E.g., in an error state. Right now that will + * not work because the error state terminates the zk client connection and + * the HA load balancer is reusing the same zk client as the embedded + * HAJournalServer. + * + * FIXME Make sure that each concrete implementation of this class sets the + * specific load balancer policy to be tested *and* verifies that the + * specific load balancer policy is in effect. + * + * FIXME Write tests of handling when a service is up but not joined with + * the met quorum and when a service is not running. There are going to be + * some cases where the {@link HALoadBalancerServlet} proxies a request + * where the target service goes asynchronously and is unable to respond. + * What kinds of exceptions does this generate and how can we handle those + * exceptions? + * + * FIXME Write tests of handling when the target service of the original + * request goes into an error state concurrent with the request. The + * {@link HALoadBalancerServlet} should be robust even when the + * {@link HAQuorumService} associated with the {@link HAJournalServer} is + * not running. We do not want to be unable to proxy to another service + * just because this one is going through an error state. Would it make + * more sense to have a 2nd Quorum object for this purpose - one that is + * not started and stopped by the HAJournalServer? + * + * FIXME Verify that the bigdata remote service call works when the client + * is a bigdata HA replication cluster. This should work just fine if the + * sparqlEndpointURL is <code>.../bigdata/LBS/sparql</code>. + * + * FIXME Verify correct load balancing when addressing a non-default + * namespace (.../bigdata/LBS/namespace/NAMESPACE/sparql). + */ +abstract public class AbstractHA3LoadBalancerTestCase extends + AbstractHA3JournalServerTestCase { + + /** + * {@inheritDoc} + * <p> + * Note: This overrides some {@link Configuration} values for the + * {@link HAJournalServer} in order to establish conditions suitable for + * testing the {@link ISnapshotPolicy} and {@link IRestorePolicy}. + */ + @Override + protected String[] getOverrides() { + + return new String[]{ +// "com.bigdata.journal.HAJournal.properties=" +TestHA3JournalServer.getTestHAJournalProperties(com.bigdata.journal.HAJournal.properties), + "com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy(0L,1,0)", + "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", +// "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"", +// "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", +// /* +// * Setup configuration that supports the LBS. Note: This does not work. We would need to override (replace) HAJournal.properties to do this. +// */ +// "com.bigdata.journal.PlatformStatsPlugIn.Options.COLLECT_PLATFORM_STATISTICS=true", +// "com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN=true", +// "com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT=true", +// "com.bigdata.journal.GangliaPlugIn.Options.REPORT_DELAY=2000", // NB: short delay is used to develop the HALBS. +// "com.bigdata.rdf.sail.webapp.HALoadBalancerServlet.policy=com.bigdata.rdf.sail.webapp.HALoadBalancerServlet.NOPLBSPolicy" + }; + + } + + public AbstractHA3LoadBalancerTestCase() { + } + + public AbstractHA3LoadBalancerTestCase(final String name) { + + super(name); + + } + + protected void setPolicy(final IHALoadBalancerPolicy policy, + final HAGlue... services) throws IOException { + + for(HAGlue service : services) { + + ((HAGlueTest) service).setHALoadBalancerPolicy(policy); + + } + + } + + /** + * Return a {@link Serializable} instance of the + * {@link IHALoadBalancerPolicy} to be tested. + * <p> + * Note: The {@link ServletContext} on the target server will govern + * the initialization of the policy. + * + * @see IHALoadBalancerPolicy#init(ServletConfig, IIndexManager) + */ + abstract protected IHALoadBalancerPolicy newTestPolicy(); + + /** + * Simple tests with the load balancer enabled. This test verifies that we + * can issue both read and write requests to the load balancer. Since read + * requests are silently forwarded to the local service if the load balancer + * is disabled, this is really only giving us good information about whether + * or not the update requests have been proxied. + */ + public void test_HA3LoadBalancer_01() throws Exception { + + final ABC abc = new ABC(true/* sequential */); + + final HAGlue serverA = abc.serverA, serverB = abc.serverB, serverC = abc.serverC; + + final HAGlue[] services = new HAGlue[] { serverA, serverB, serverC }; + + // Verify quorum is FULLY met. + awaitFullyMetQuorum(); + + // await the KB create commit point to become visible on each service. + awaitCommitCounter(1L, services); + + // Verify binary equality of ALL journals. + assertDigestsEquals(services); + + // Impose the desired LBS policy. + setPolicy(newTestPolicy(), services); + + // Repositories without the LBS. + final RemoteRepository[] repos = new RemoteRepository[3]; + repos[0] = getRemoteRepository(serverA); + repos[1] = getRemoteRepository(serverB); + repos[2] = getRemoteRepository(serverC); + + /* + * Verify that query on all nodes is allowed. + */ + for (RemoteRepository r : repos) { + + // Should be empty. + assertEquals(0L, + countResults(r.prepareTupleQuery("SELECT * {?a ?b ?c}") + .evaluate())); + + } + + final RemoteRepository[] reposLBS = new RemoteRepository[3]; + reposLBS[0] = getRemoteRepository(serverA, true/* useLBS */); + reposLBS[1] = getRemoteRepository(serverB, true/* useLBS */); + reposLBS[2] = getRemoteRepository(serverC, true/* useLBS */); + + /* + * Verify that query on all nodes is allowed using the LBS. + */ + for (RemoteRepository r : reposLBS) { + + // Should be empty. + assertEquals(0L, + countResults(r.prepareTupleQuery("SELECT * {?a ?b ?c}") + .evaluate())); + + } + + /* + * Send an update request to the LBS on each service. The request should + * be proxied to the leader if it is directed to a follower. This is how + * we know whether the LBS is active or not. If it is not active, then + * the follower will refuse to process the update. + */ + + simpleTransaction_noQuorumCheck(serverA, true/* useLBS */); // leader. + simpleTransaction_noQuorumCheck(serverB, true/* useLBS */); // follower. + simpleTransaction_noQuorumCheck(serverC, true/* useLBS */); // follower. + + // await the KB create commit point to become visible on each service. + awaitCommitCounter(4L, new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + } + + /** + * Test of DELETE on the leader. + */ + public void test_delete_leader() throws Exception { + + final ABC abc = new ABC(true/* sequential */); + + final HAGlue serverA = abc.serverA, serverB = abc.serverB, serverC = abc.serverC; + + // The expected HStatus for each of the services (A,B,C). + final HAStatusEnum[] expectedHAStatusArray = new HAStatusEnum[] { // + HAStatusEnum.Leader, // + HAStatusEnum.Follower,// + HAStatusEnum.Follower // + }; + + // The services in their join order. + final HAGlue[] services = new HAGlue[] { serverA, serverB, serverC }; + + // Verify quorum is FULLY met. + awaitFullyMetQuorum(); + + // await the KB create commit point to become visible on each service. + awaitCommitCounter(1L, services); + + // Verify binary equality of ALL journals. + assertDigestsEquals(services); + + // Verify the HAStatus of each service. + awaitHAStatus(expectedHAStatusArray, services); + + // Impose the desired LBS policy. + setPolicy(newTestPolicy(), services); + + // Repositories without the LBS. + final RemoteRepository[] repos = new RemoteRepository[3]; + repos[0] = getRemoteRepository(serverA); + repos[1] = getRemoteRepository(serverB); + repos[2] = getRemoteRepository(serverC); + + // Repositories with the LBS. + final RemoteRepository[] reposLBS = new RemoteRepository[3]; + reposLBS[0] = getRemoteRepository(serverA, true/* useLBS */); + reposLBS[1] = getRemoteRepository(serverB, true/* useLBS */); + reposLBS[2] = getRemoteRepository(serverC, true/* useLBS */); + + // Add some data using leader. + simpleTransaction_noQuorumCheck(serverA, false/* useLBS */); // leader. + + // Should be non-zero. + assertNotSame(0L, getCountStar(serverA, false/* useLBS */)); + + // delete everything using leader. + reposLBS[0].remove(new RemoveOp(null, null, null)); + + // Verify everything is gone on the leader. + awaitHAStatus(serverA, HAStatusEnum.Leader); + + // Should be zero. + assertEquals(0L, getCountStar(serverA, false/* useLBS */)); + + // await the commit point to become visible on each service. + awaitCommitCounter(3L, services); + + // Verify binary equality of ALL journals. + assertDigestsEquals(services); + + } + + /** + * Test of DELETE on the follower. + */ + public void test_delete_follower() throws Exception { + + final ABC abc = new ABC(true/* sequential */); + + final HAGlue serverA = abc.serverA, serverB = abc.serverB, serverC = abc.serverC; + + // The expected HStatus for each of the services (A,B,C). + final HAStatusEnum[] expectedHAStatusArray = new HAStatusEnum[] { // + HAStatusEnum.Leader, // + HAStatusEnum.Follower,// + HAStatusEnum.Follower // + }; + + // The services in their join order. + final HAGlue[] services = new HAGlue[] { serverA, serverB, serverC }; + + // Verify quorum is FULLY met. + awaitFullyMetQuorum(); + + // await the KB create commit point to become visible on each service. + awaitCommitCounter(1L, services); + + // Verify binary equality of ALL journals. + assertDigestsEquals(services); + + // Verify the HAStatus of each service. + awaitHAStatus(expectedHAStatusArray, services); + + // Impose the desired LBS policy. + setPolicy(newTestPolicy(), services); + + // Repositories without the LBS. + final RemoteRepository[] repos = new RemoteRepository[3]; + repos[0] = getRemoteRepository(serverA); + repos[1] = getRemoteRepository(serverB); + repos[2] = getRemoteRepository(serverC); + + // Repositories with the LBS. + final RemoteRepository[] reposLBS = new RemoteRepository[3]; + reposLBS[0] = getRemoteRepository(serverA, true/* useLBS */); + reposLBS[1] = getRemoteRepository(serverB, true/* useLBS */); + reposLBS[2] = getRemoteRepository(serverC, true/* useLBS */); + + // Add some data using leader. + simpleTransaction_noQuorumCheck(serverA, false/* useLBS */); // leader. + + // Should be non-zero. + assertNotSame(0L, getCountStar(serverA, false/* useLBS */)); + + // delete everything using 1st follower + reposLBS[1].remove(new RemoveOp(null, null, null)); + + // Verify everything is gone on the leader. + awaitHAStatus(serverA, HAStatusEnum.Leader); + + // Should be zero. + assertEquals(0L, getCountStar(serverA, false/* useLBS */)); + + // await the commit point to become visible on each service. + awaitCommitCounter(3L, services); + + // Verify binary equality of ALL journals. + assertDigestsEquals(services); + + } + + /** + * Test of a simple sequence of events that I often test through a web + * browser. This verifies that we can read on A and B using + * + * <pre> + * SELECT COUNT (*) + * </pre> + * + * and that we can write on A and B using + * + * <pre> + * PREFIX dc: <http://purl.org/dc/elements/1.1/> + * INSERT DATA + * { + * <http://example/book1> dc:title "A new book" ; + * dc:creator "A.N.Other" . + * } + * </pre> + * + * and + * + * <pre> + * PREFIX dc: <http://purl.org/dc/elements/1.1/> + * DELETE DATA + * { + * <http://example/book1> dc:title "A new book" ; + * dc:creator "A.N.Other" . + * } + * </pre> + */ + public void test_simple_sequence() throws Exception { + + final ABC abc = new ABC(true/* sequential */); + + final HAGlue serverA = abc.serverA, serverB = abc.serverB, serverC = abc.serverC; + + // The expected HStatus for each of the services (A,B,C). + final HAStatusEnum[] expectedHAStatusArray = new HAStatusEnum[] { // + HAStatusEnum.Leader, // + HAStatusEnum.Follower,// + HAStatusEnum.Follower // + }; + + // The services in their join order. + final HAGlue[] services = new HAGlue[] { serverA, serverB, serverC }; + + // Verify quorum is FULLY met. + awaitFullyMetQuorum(); + + // await the KB create commit point to become visible on each service. + awaitCommitCounter(1L, services); + + // Verify binary equality of ALL journals. + assertDigestsEquals(services); + + // Verify the HAStatus of each service. + awaitHAStatus(expectedHAStatusArray, services); + + // Impose the desired LBS policy. + setPolicy(newTestPolicy(), services); + + // Repositories without the LBS. + final RemoteRepository[] repos = new RemoteRepository[3]; + repos[0] = getRemoteRepository(serverA); + repos[1] = getRemoteRepository(serverB); + repos[2] = getRemoteRepository(serverC); + + // Repositories with the LBS. + final RemoteRepository[] reposLBS = new RemoteRepository[3]; + reposLBS[0] = getRemoteRepository(serverA, true/* useLBS */); + reposLBS[1] = getRemoteRepository(serverB, true/* useLBS */); + reposLBS[2] = getRemoteRepository(serverC, true/* useLBS */); + + /* + * Verify read on each service. + */ + assertEquals(0L, getCountStar(serverA)); + assertEquals(0L, getCountStar(serverB)); + assertEquals(0L, getCountStar(serverC)); + + /* + * Verify insert on A, read back on all. + */ + simpleInsertTransaction_noQuorumCheck(serverA, true/* useLoadBalancer */); + + assertEquals(2L, getCountStar(serverA)); + assertEquals(2L, getCountStar(serverB)); + assertEquals(2L, getCountStar(serverC)); + + /* + * Verify delete on B, read back on all. + */ + simpleDeleteTransaction_noQuorumCheck(serverB, true/* useLoadBalancer */); + + assertEquals(0L, getCountStar(serverA)); + assertEquals(0L, getCountStar(serverB)); + assertEquals(0L, getCountStar(serverC)); + + } + + protected void simpleInsertTransaction_noQuorumCheck(final HAGlue haGlue, + final boolean useLoadBalancer) throws IOException, Exception { + + final StringBuilder sb = new StringBuilder(); + sb.append("PREFIX dc: <http://purl.org/dc/elements/1.1/>\n"); + sb.append("INSERT DATA {\n"); + sb.append(" <http://example/book1> dc:title \"A new book\" ;\n"); + sb.append(" dc:creator \"A.N.Other\" .\n"); + sb.append("}\n"); + + final String updateStr = sb.toString(); + + final RemoteRepository repo = getRemoteRepository(haGlue, + useLoadBalancer); + + repo.prepareUpdate(updateStr).evaluate(); + + } + + protected void simpleDeleteTransaction_noQuorumCheck(final HAGlue haGlue, + final boolean useLoadBalancer) throws IOException, Exception { + + final StringBuilder sb = new StringBuilder(); + sb.append("PREFIX dc: <http://purl.org/dc/elements/1.1/>\n"); + sb.append("DELETE DATA {\n"); + sb.append(" <http://example/book1> dc:title \"A new book\" ;\n"); + sb.append(" dc:creator \"A.N.Other\" .\n"); + sb.append("}\n"); + + final String updateStr = sb.toString(); + + final RemoteRepository repo = getRemoteRepository(haGlue, + useLoadBalancer); + + repo.prepareUpdate(updateStr).evaluate(); + + } + +} Modified: branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java =================================================================== --- branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-04-30 18:39:03 UTC (rev 8156) +++ branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-04-30 18:49:20 UTC (rev 8157) @@ -550,29 +550,51 @@ protected RemoteRepository getRemoteRepository(final HAGlue haGlue) throws IOException { + return getRemoteRepository(haGlue, false/* useLoadBalancer */); + + } + + /** + * Return a {@link RemoteRepository} for talking to the + * {@link NanoSparqlServer} instance associated with an {@link HAGlue} + * interface. + * + * @param haGlue + * The service. + * @param useLoadBalancer + * when <code>true</code> the URL will be the load balancer on + * that service and the request MAY be redirected to another + * service. + * + * @throws IOException + */ + protected RemoteRepository getRemoteRepository(final HAGlue haGlue, + final boolean useLoadBalancer) throws IOException { + final String sparqlEndpointURL = getNanoSparqlServerURL(haGlue) +// + (useLoadBalancer ? "/LBS" : "") + "/sparql"; - + // Client for talking to the NSS. final HttpClient httpClient = new DefaultHttpClient(ccm); final RemoteRepository repo = new RemoteRepository(sparqlEndpointURL, - httpClient, executorService); + useLoadBalancer, httpClient, executorService); return repo; } - protected RemoteRepositoryManager getRemoteRepositoryManager(final HAGlue haGlue) - throws IOException { + protected RemoteRepositoryManager getRemoteRepositoryManager( + final HAGlue haGlue, final boolean useLBS) throws IOException { final String endpointURL = getNanoSparqlServerURL(haGlue); // Client for talking to the NSS. final HttpClient httpClient = new DefaultHttpClient(ccm); - final RemoteRepositoryManager repo = new RemoteRepositoryManager(endpointURL, - httpClient, executorService); + final RemoteRepositoryManager repo = new RemoteRepositoryManager( + endpointURL, useLBS, httpClient, executorService); return repo; @@ -592,17 +614,22 @@ protected long countResults(final TupleQueryResult result) throws Exception { long count = 0; + try { - while (result.hasNext()) { + while (result.hasNext()) { - result.next(); + result.next(); - count++; + count++; + } + + } finally { + + result.close(); + } - result.close(); - return count; } @@ -613,26 +640,47 @@ * * @param haGlue * The service. + * * @return The value reported by COUNT(*). + * * @throws Exception * @throws IOException */ protected long getCountStar(final HAGlue haGlue) throws IOException, Exception { - return new CountStarTask(haGlue).call(); + return getCountStar(haGlue, false/* useLBS */); } /** + * Report COUNT(*) for the default SPARQL end point for an {@link HAGlue} + * instance. + * + * @param haGlue + * The service. + * @param useLBS + * <code>true</code> iff the load balancer end point should be + * used for the request. + * + * @return The value reported by COUNT(*). + * + * @throws Exception + * @throws IOException + */ + protected long getCountStar(final HAGlue haGlue, final boolean useLBS) + throws IOException, Exception { + + return new CountStarTask(haGlue, useLBS).call(); + + } + + /** * Task reports COUNT(*) for the default SPARQL end point for an * {@link HAGlue} instance. */ protected class CountStarTask implements Callable<Long> { -// /** The service to query. */ -// private final HAGlue haGlue; - /** * The SPARQL end point for that service. */ @@ -647,17 +695,19 @@ /** * @param haGlue * The service to query. - * - * @throws IOException + * @param useLBS + * <code>true</code> iff the load balanced end point should + * be used. + * + * @throws IOException */ - public CountStarTask(final HAGlue haGlue) throws IOException { - -// this.haGlue = haGlue; - + public CountStarTask(final HAGlue haGlue, final boolean useLBS) + throws IOException { + /* * Run query against one of the services. */ - remoteRepo = getRemoteRepository(haGlue); + remoteRepo = getRemoteRepository(haGlue, useLBS); } @@ -665,6 +715,7 @@ * Return the #of triples reported by <code>COUNT(*)</code> for * the SPARQL end point. */ + @Override public Long call() throws Exception { final String query = "SELECT (COUNT(*) AS ?count) WHERE { ?s ?p ?o }"; @@ -678,10 +729,11 @@ // done. final Value v = bs.getBinding("count").getValue(); - return (long) ((org.openrdf.model.Literal) v).intValue(); + return ((org.openrdf.model.Literal) v).longValue(); + } - }; + } /** * Wait until the KB exists. Modified: branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config =================================================================== --- branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-04-30 18:39:03 UTC (rev 8156) +++ branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-04-30 18:49:20 UTC (rev 8157) @@ -281,6 +281,9 @@ new NV(AbstractTransactionService.Options.MIN_RELEASE_AGE,"1"), + new NV(com.bigdata.journal.PlatformStatsPlugIn.Options.COLLECT_PLATFORM_STATISTICS,"true"), + new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN,"true"), + }, bigdata.kb); } Modified: branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config =================================================================== --- branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config 2014-04-30 18:39:03 UTC (rev 8156) +++ branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config 2014-04-30 18:49:20 UTC (rev 8157) @@ -280,6 +280,9 @@ new NV(AbstractTransactionService.Options.MIN_RELEASE_AGE,"1"), + new NV(com.bigdata.journal.PlatformStatsPlugIn.Options.COLLECT_PLATFORM_STATISTICS,"true"), + new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN,"true"), + }, bigdata.kb); } Modified: branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config =================================================================== --- branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-04-30 18:39:03 UTC (rev 8156) +++ branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-04-30 18:49:20 UTC (rev 8157) @@ -280,6 +280,9 @@ new NV(AbstractTransactionService.Options.MIN_RELEASE_AGE,"1"), + new NV(com.bigdata.journal.PlatformStatsPlugIn.Options.COLLECT_PLATFORM_STATISTICS,"true"), + new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN,"true"), + }, bigdata.kb); } Modified: branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java =================================================================== --- branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2014-04-30 18:39:03 UTC (rev 8156) +++ branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2014-04-30 18:49:20 UTC (rev 8157) @@ -101,6 +101,8 @@ import com.bigdata.rdf.sail.BigdataSail; import com.bigdata.rdf.sail.BigdataSailRepository; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; +import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet; +import com.bigdata.rdf.sail.webapp.IHALoadBalancerPolicy; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.service.jini.RemoteDestroyAdmin; @@ -357,6 +359,19 @@ * Variant that does not clear out the last root cause. */ public Throwable getLastRootCause() throws IOException; + + /** + * Set the {@link IHALoadBalancerPolicy} for each active instance of the + * {@link HALoadBalancerServlet}. + * + * @param policy + * The policy. + * + * @throws IOException + */ + public void setHALoadBalancerPolicy(final IHALoadBalancerPolicy policy) + throws IOException; + } /** @@ -830,7 +845,7 @@ } @Override - public long awaitHAReady(long timeout, TimeUnit unit) + public long awaitHAReady(final long timeout, final TimeUnit unit) throws InterruptedException, TimeoutException, AsynchronousQuorumCloseException { @@ -842,7 +857,7 @@ } @Override - public IHARootBlockResponse getRootBlock(IHARootBlockRequest msg) { + public IHARootBlockResponse getRootBlock(final IHARootBlockRequest msg) { checkMethod("getRootBlock", new Class[] { IHARootBlockRequest.class }); @@ -886,7 +901,7 @@ } @Override - public IHADigestResponse computeDigest(IHADigestRequest req) + public IHADigestResponse computeDigest(final IHADigestRequest req) throws IOException, NoSuchAlgorithmException, DigestException { checkMethod("computeDigest", new Class[] { IHADigestRequest.class }); @@ -896,8 +911,9 @@ } @Override - public IHALogDigestResponse computeHALogDigest(IHALogDigestRequest req) - throws IOException, NoSuchAlgorithmException, DigestException { + public IHALogDigestResponse computeHALogDigest( + final IHALogDigestRequest req) throws IOException, + NoSuchAlgorithmException, DigestException { checkMethod("computeHALogDigest", new Class[] { IHALogDigestRequest.class }); @@ -908,7 +924,7 @@ @Override public IHASnapshotDigestResponse computeHASnapshotDigest( - IHASnapshotDigestRequest req) throws IOException, + final IHASnapshotDigestRequest req) throws IOException, NoSuchAlgorithmException, DigestException { checkMethod("computeHASnapshotDigest", @@ -919,8 +935,8 @@ } @Override - public Future<IHASnapshotResponse> takeSnapshot(IHASnapshotRequest req) - throws IOException { + public Future<IHASnapshotResponse> takeSnapshot( + final IHASnapshotRequest req) throws IOException { checkMethod("takeSnapshot", new Class[] { IHASnapshotRequest.class }); @@ -930,7 +946,8 @@ } @Override - public Future<Void> rebuildFromLeader(IHARemoteRebuildRequest req) throws IOException { + public Future<Void> rebuildFromLeader(final IHARemoteRebuildRequest req) + throws IOException { checkMethod("restoreFromLeader", new Class[] { IHARemoteRebuildRequest.class }); @@ -956,7 +973,7 @@ @Override public void gatherMinimumVisibleCommitTime( - IHAGatherReleaseTimeRequest req) throws IOException { + final IHAGatherReleaseTimeRequest req) throws IOException { checkMethod("gatherMinimumVisibleCommitTime", new Class[] { IHAGatherReleaseTimeRequest.class }); @@ -967,7 +984,7 @@ @Override public IHANotifyReleaseTimeResponse notifyEarliestCommitTime( - IHANotifyReleaseTimeRequest req) throws IOException, + final IHANotifyReleaseTimeRequest req) throws IOException, InterruptedException, BrokenBarrierException { checkMethod("notifyEarliestCommitTime", @@ -1070,7 +1087,7 @@ } @Override - public Future<Void> abort2Phase(IHA2PhaseAbortMessage abortMessage) { + public Future<Void> abort2Phase(final IHA2PhaseAbortMessage abortMessage) { checkMethod("abort2Phase", new Class[] { IHA2PhaseAbortMessage.class }); @@ -1137,7 +1154,7 @@ @Override public IHAWriteSetStateResponse getHAWriteSetState( - IHAWriteSetStateRequest req) { + final IHAWriteSetStateRequest req) { checkMethod("getHAWriteSetState", new Class[] { IHAWriteSetStateRequest.class }); @@ -1148,7 +1165,7 @@ @Override public IHALogRootBlocksResponse getHALogRootBlocksForWriteSet( - IHALogRootBlocksRequest msg) throws IOException { + final IHALogRootBlocksRequest msg) throws IOException { checkMethod("getHALogRootBlocksForWriteSet", new Class[] { IHALogRootBlocksRequest.class }); @@ -1158,7 +1175,7 @@ } @Override - public Future<Void> sendHALogForWriteSet(IHALogRequest msg) + public Future<Void> sendHALogForWriteSet(final IHALogRequest msg) throws IOException { checkMethod("sendHALogForWriteSet", @@ -1169,7 +1186,7 @@ } @Override - public Future<IHASendStoreResponse> sendHAStore(IHARebuildRequest msg) + public Future<IHASendStoreResponse> sendHAStore(final IHARebuildRequest msg) throws IOException { checkMethod("sendHAStore", new Class[] { IHARebuildRequest.class }); @@ -1426,6 +1443,20 @@ * @see HAGlueTest#getAndClearLastRootCause() */ private AtomicReference<Throwable> lastRootCause = new AtomicReference<Throwable>(); + + @Override + public void setHALoadBalancerPolicy(final IHALoadBalancerPolicy policy) + throws IOException { + + if (policy == null) + throw new IllegalArgumentException(); + + if (log.isInfoEnabled()) + log.info("Will set LBS policy: " + policy); + + getHAJournalServer().setHALoadBalancerPolicy(policy); + + } } // class HAGlueTestImpl Modified: branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java =================================================================== --- branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2014-04-30 18:39:03 UTC (rev 8156) +++ branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2014-04-30 18:49:20 UTC (rev 8157) @@ -74,11 +74,6 @@ // Basic tests for a single HAJournalServer (quorum does not meet) suite.addTestSuite(TestHAJournalServer.class); - // HA1 test suite. - suite.addTestSuite(TestHA1JournalServer.class); - suite.addTestSuite(TestHA1SnapshotPolicy.class); - ... [truncated message content] |
From: <tho...@us...> - 2014-04-30 18:39:06
|
Revision: 8156 http://sourceforge.net/p/bigdata/code/8156 Author: thompsonbry Date: 2014-04-30 18:39:03 +0000 (Wed, 30 Apr 2014) Log Message: ----------- Updated logger levels for development (commented out). Modified Paths: -------------- branches/RDR/bigdata/src/resources/logging/log4j-dev.properties Modified: branches/RDR/bigdata/src/resources/logging/log4j-dev.properties =================================================================== --- branches/RDR/bigdata/src/resources/logging/log4j-dev.properties 2014-04-30 18:34:41 UTC (rev 8155) +++ branches/RDR/bigdata/src/resources/logging/log4j-dev.properties 2014-04-30 18:39:03 UTC (rev 8156) @@ -12,6 +12,21 @@ log4j.logger.com.bigdata.btree=WARN +# webapp logging. +#log4j.logger.com.bigdata.rdf.sail.webapp=ALL +#log4j.logger.com.bigdata.rdf.sail.webapp.RESTServlet=INFO +#log4j.logger.com.bigdata.rdf.sail.webapp.HALoadBalancerServlet=ALL +#log4j.logger.com.bigdata.ganglia.GangliaService=INFO + +# jetty debug logging. +#log4j.logger.org.eclipse.jetty=INFO +#log4j.logger.org.eclipse.jetty.client=DEBUG +#log4j.logger.org.eclipse.jetty.proxy=DEBUG + +# RDF Graph Mining API +#log4j.logger.com.bigdata.rdf.graph=ALL +#log4j.logger.com.bigdata.rdf.graph.impl.bd.GASService=INFO + #log4j.logger.com.bigdata.htree=DEBUG #log4j.logger.com.bigdata.htree.TestHTreeWithMemStore=INFO @@ -46,8 +61,6 @@ #log4j.logger.com.bigdata.rdf.rio.ntriples.BigdataNTriplesParserTestCase=ALL #log4j.logger.com.bigdata.rdf.rio.StatementBuffer=ALL -#log4j.logger.com.bigdata.rdf.sail.webapp=ALL - # To see the masked type errors. #log4j.logger.com.bigdata.bop.solutions.TypeErrorLog=INFO This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-30 18:34:43
|
Revision: 8155 http://sourceforge.net/p/bigdata/code/8155 Author: thompsonbry Date: 2014-04-30 18:34:41 +0000 (Wed, 30 Apr 2014) Log Message: ----------- Replaced "UTF-8" with reference to a global constant for better tracking of the charset reference. Modified Paths: -------------- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-04-30 18:34:12 UTC (rev 8154) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-04-30 18:34:41 UTC (rev 8155) @@ -1997,7 +1997,7 @@ * XSL style sheet directive. */ mimeType = BigdataServlet.MIME_APPLICATION_XML; - charset = Charset.forName("UTF-8"); + charset = Charset.forName(BigdataRDFServlet.UTF8); fileExt = "xml"; } else { mimeType = format.getDefaultMIMEType(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-30 18:34:15
|
Revision: 8154 http://sourceforge.net/p/bigdata/code/8154 Author: dmekonnen Date: 2014-04-30 18:34:12 +0000 (Wed, 30 Apr 2014) Log Message: ----------- Backing out changes for the deploy-artifact-nss target. Modified Paths: -------------- branches/RDR/build.xml Modified: branches/RDR/build.xml =================================================================== --- branches/RDR/build.xml 2014-04-30 18:32:53 UTC (rev 8153) +++ branches/RDR/build.xml 2014-04-30 18:34:12 UTC (rev 8154) @@ -1215,21 +1215,6 @@ src="http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer?printable=yes" /> - <!-- Stage files specific to NSS deployments provided by Brew and Chef. --> - <chmod file="${dist.bin}/bigdata" perm="755" /> - <copy file="${src.resources}/deployment/nss/bin/bigdata" - todir="${dist.bin}" /> - <chmod file="${dist.bin}/bigdata" perm="755" /> - <copy file="${src.resources}/deployment/nss/bin/startNSS" - todir="${dist.bin}" /> - <chmod file="${dist.bin}/startNSS" perm="755" /> - <copy file="${src.resources}/deployment/nss/etc/jetty.xml" - todir="${dist.var.jetty}/etc" /> - <copy file="${src.resources}/deployment/nss/WEB-INF/RWStore.properties" - todir="${dist.var.jetty}/WEB-INF" /> - <copy file="${src.resources}/deployment/nss/WEB-INF/classes/log4j.properties" - todir="${dist.var.jetty}/WEB-INF/classes" /> - </target> <!-- --> @@ -1309,40 +1294,7 @@ </target> - <target name="deploy-artifact-nss" depends="clean, stage" - description="Create compressed tar file for Jetty based deployment via Brew and Chef installers."> - <tar destfile="${bigdata.dir}/REL-NSS.${version}.tgz" - compression="gzip"> - - <tarfileset dir="${bigdata.dir}/dist"> - <include name="bigdata/doc/**" /> - <exclude name="bigdata/doc/HAJournalServer.html" /> - <include name="bigdata/lib/**" /> - <exclude name="bigdata/lib/bigdata-ganglia.jar" /> - <exclude name="bigdata/lib/browser.jar" /> - <exclude name="bigdata/lib/reggie.jar" /> - <exclude name="bigdata/lib/zookeeper.jar" /> - <exclude name="bigdata/lib/jsk-*.jar" /> - <exclude name="bigdata/lib-dl" /> - <exclude name="bigdata/lib-ext" /> - <include name="bigdata/var/jetty/**" /> - <include name="bigdata/var/config/logging/logging.properties" /> - <exclude name="bigdata/var/jetty/jetty.xml" /> - <exclude name="bigdata/var/jetty/html/new.html" /> - <exclude name="bigdata/var/jetty/html/old.html" /> - </tarfileset> - - <!-- Add scripts separately, making them executable --> - - <tarfileset dir="${bigdata.dir}/dist" filemode="755"> - <include name="bigdata/bin/bigdata" /> - <include name="bigdata/bin/startNSS" /> - </tarfileset> - </tar> - - </target> - <!-- FIXME DEBUG and add 'depends="javadoc, stage" (should stage stage javadoc?)' --> <!-- Note: can require 'rpm' and 'rpm-build. --> <!-- TODO: We do not need both this and "deploy-artifact". --> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-30 18:32:56
|
Revision: 8153 http://sourceforge.net/p/bigdata/code/8153 Author: thompsonbry Date: 2014-04-30 18:32:53 +0000 (Wed, 30 Apr 2014) Log Message: ----------- Exposed reference to the HAJournalServer for use by the HA LBS. Modified Paths: -------------- branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java Modified: branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2014-04-30 18:32:23 UTC (rev 8152) +++ branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2014-04-30 18:32:53 UTC (rev 8153) @@ -268,7 +268,7 @@ * The {@link HAJournalServer} instance that is managing this * {@link HAJournal}. */ - protected HAJournalServer getHAJournalServer() { + public HAJournalServer getHAJournalServer() { return server; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-30 18:32:27
|
Revision: 8152 http://sourceforge.net/p/bigdata/code/8152 Author: thompsonbry Date: 2014-04-30 18:32:23 +0000 (Wed, 30 Apr 2014) Log Message: ----------- Added method to GangliaService for the caller to specify the hosts of interest. Marked the HostReportComparable as Serializable to support use cases where the IHALoadBalancerPolicy is set remotely (this is done in the HA LBS test suite). @see #624 (HA LBS) Modified Paths: -------------- branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/HostReportComparator.java Modified: branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java =================================================================== --- branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java 2014-04-30 17:52:58 UTC (rev 8151) +++ branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java 2014-04-30 18:32:23 UTC (rev 8152) @@ -1364,15 +1364,43 @@ */ public IHostReport[] getHostReport(final String[] reportOn, final Comparator<IHostReport> comparator) { + + final String[] hosts = gangliaState.getKnownHosts(); + return getHostReport(hosts, reportOn, comparator); + + } + + /** + * Return a host report based on the current state (similar to + * <code>gstat -a</code>). + * <p> + * Note: The report will not be accurate immediately as the + * {@link GangliaService} needs to build up a model of the current state of + * the monitored hosts. + * + * @param hosts + * The hosts for which host reports will be returned. + * @param reportOn + * The metrics to be reported for each host. The + * {@link IHostReport#getMetrics()} is an ordered map and will + * reflect the metrics in the order in which they are requested + * here. + * @param comparator + * The comparator used to order the {@link IHostReport}s. + * + * @return The {@link IHostReport}s for each specified host ordered by the + * given {@link Comparator}. + */ + public IHostReport[] getHostReport(final String[] hosts, + final String[] reportOn, final Comparator<IHostReport> comparator) { + if (reportOn == null || reportOn.length == 0) throw new IllegalArgumentException(); if (comparator == null) throw new IllegalArgumentException(); - final String[] hosts = gangliaState.getKnownHosts(); - final IHostReport[] a = new IHostReport[hosts.length]; for (int i = 0; i < a.length; i++) { Modified: branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/HostReportComparator.java =================================================================== --- branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/HostReportComparator.java 2014-04-30 17:52:58 UTC (rev 8151) +++ branches/RDR/bigdata-ganglia/src/java/com/bigdata/ganglia/HostReportComparator.java 2014-04-30 18:32:23 UTC (rev 8152) @@ -15,14 +15,21 @@ */ package com.bigdata.ganglia; +import java.io.Serializable; import java.util.Comparator; /** * Orders {@link IHostReport}s. */ -public class HostReportComparator implements Comparator<IHostReport> { +public class HostReportComparator implements Comparator<IHostReport>, + Serializable { - private final String metricName; + /** + * + */ + private static final long serialVersionUID = 1L; + + private final String metricName; private final boolean asc; /** This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-30 17:53:02
|
Revision: 8151 http://sourceforge.net/p/bigdata/code/8151 Author: dmekonnen Date: 2014-04-30 17:52:58 +0000 (Wed, 30 Apr 2014) Log Message: ----------- additional refinements following feedkback. Modified Paths: -------------- branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb branches/RDR/src/resources/deployment/nss/bin/bigdata Modified: branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb =================================================================== --- branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb 2014-04-30 15:45:05 UTC (rev 8150) +++ branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb 2014-04-30 17:52:58 UTC (rev 8151) @@ -1,59 +1,30 @@ -require 'formula' +require "formula" class Bigdata < Formula - homepage 'http://bigdata.com/blog/' - url 'http://bigdata.com/deploy/bigdata-1.3.0.tgz' - sha1 '5bfec0cfe47139dc0ab3ead7f61d5fc156b57bb9' + homepage "http://bigdata.com/blog/" + url "http://bigdata.com/deploy/bigdata-1.3.0.tgz" + sha1 "c22fa05df965019b3132161507ce0e77a4a1f6e2" def install - prefix.install Dir['*'] + prefix.install "doc" + prefix.install "var" + prefix.install "bin" + libexec.install "lib" - # make brew happy and rename the "lib" directory: - system "mv #{lib} #{libexec}" - # Set the installation path as the root for the bin scripts: - system "sed -i .bak 's|<%= BD_HOME %>|#{prefix}|' #{bin}/bigdata" - system "sed -i .bak 's|<%= INSTALL_TYPE %>|BREW|' #{bin}/bigdata ; rm #{bin}/bigdata.bak" + inreplace "#{bin}/bigdata", "<%= BD_HOME %>", prefix + inreplace "#{bin}/bigdata", "<%= INSTALL_TYPE %>", "BREW" - # Set the Jetty root as the resourceBase in the jetty.xml file: - system "sed -i .bak 's|<%= JETTY_DIR %>|#{prefix}/var/jetty|' #{prefix}/var/jetty/etc/jetty.xml ; rm #{prefix}/var/jetty/etc/jetty.xml.bak" + inreplace "#{prefix}/var/jetty/etc/jetty.xml", "<%= JETTY_DIR %>", "#{prefix}/var/jetty" # Set the installation path as the root for bigdata.jnl file location (<bigdata_home>/data): - system "sed -i .bak 's|<%= BD_HOME %>|#{prefix}|' #{prefix}/var/jetty/WEB-INF/RWStore.properties ; rm #{prefix}/var/jetty/WEB-INF/RWStore.properties.bak" + inreplace "#{prefix}/var/jetty/WEB-INF/RWStore.properties", "<%= BD_HOME %>", prefix # Set the installation path as the root for log files (<bigdata_home>/log): - system "sed -i .bak 's|<%= BD_HOME %>|#{prefix}|' #{prefix}/var/jetty/WEB-INF/classes/log4j.properties; rm #{prefix}/var/jetty/WEB-INF/classes/log4j.properties.bak " + inreplace "#{prefix}/var/jetty/WEB-INF/classes/log4j.properties", "<%= BD_HOME %>", prefix end - def caveats; <<-EOS.undent - After launching, visit the Bigdata Workbench at: - - http://localhost:8080/bigdata - - "bigdata" command synopis: - ------------------------- - - Start the server: - - % bigdata start - - Stop the server: - - % bigdata stop - - Restart the server: - - % bigdata restart - - To tune the server configuration, edit the "#{prefix}/var/jetty/WEB-INF/RWStore.properties" file. - - Further documentation: - - #{prefix}/doc - EOS - end - plist_options :startup => 'true', :manual => 'bigdata start' def plist; <<-EOS.undent Modified: branches/RDR/src/resources/deployment/nss/bin/bigdata =================================================================== --- branches/RDR/src/resources/deployment/nss/bin/bigdata 2014-04-30 15:45:05 UTC (rev 8150) +++ branches/RDR/src/resources/deployment/nss/bin/bigdata 2014-04-30 17:52:58 UTC (rev 8151) @@ -24,8 +24,8 @@ # # the following template line will be replaced by a deployer application (e.g. brew, chef) # -export INSTALL_TYPE="BREW" -export BD_HOME="/usr/local/Cellar/bigdata/1.3.0" +export INSTALL_TYPE="<%= INSTALL_TYPE %>" +export BD_HOME="<%= BD_HOME %>" pidFile=${BD_HOME}/var/lock/pid binDir=${BD_HOME}/bin @@ -101,6 +101,7 @@ # # Usage # + me=`basename $0` echo $"Usage: $0 {start|stop|status|restart}" exit 1 esac This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-30 15:45:09
|
Revision: 8150 http://sourceforge.net/p/bigdata/code/8150 Author: dmekonnen Date: 2014-04-30 15:45:05 +0000 (Wed, 30 Apr 2014) Log Message: ----------- Updates for to pass the brew audit. Modified Paths: -------------- branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb branches/RDR/src/resources/deployment/nss/bin/bigdata branches/RDR/src/resources/deployment/nss/bin/startNSS Modified: branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb =================================================================== --- branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb 2014-04-30 11:22:55 UTC (rev 8149) +++ branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb 2014-04-30 15:45:05 UTC (rev 8150) @@ -3,27 +3,31 @@ class Bigdata < Formula homepage 'http://bigdata.com/blog/' url 'http://bigdata.com/deploy/bigdata-1.3.0.tgz' - sha1 '466cbce9241e3d418a53a88d81b108f42f4e9f4a' + sha1 '5bfec0cfe47139dc0ab3ead7f61d5fc156b57bb9' def install prefix.install Dir['*'] + # make brew happy and rename the "lib" directory: + system "mv #{lib} #{libexec}" + # Set the installation path as the root for the bin scripts: - system "sed -i .bk 's|<%= BD_HOME %>|#{prefix}|' #{bin}/bigdata" + system "sed -i .bak 's|<%= BD_HOME %>|#{prefix}|' #{bin}/bigdata" + system "sed -i .bak 's|<%= INSTALL_TYPE %>|BREW|' #{bin}/bigdata ; rm #{bin}/bigdata.bak" - + # Set the Jetty root as the resourceBase in the jetty.xml file: - system "sed -i .bk 's|<%= JETTY_DIR %>|#{prefix}/var/jetty|' #{prefix}/var/jetty/etc/jetty.xml" + system "sed -i .bak 's|<%= JETTY_DIR %>|#{prefix}/var/jetty|' #{prefix}/var/jetty/etc/jetty.xml ; rm #{prefix}/var/jetty/etc/jetty.xml.bak" # Set the installation path as the root for bigdata.jnl file location (<bigdata_home>/data): - system "sed -i .bk 's|<%= BD_HOME %>|#{prefix}|' #{prefix}/var/jetty/WEB-INF/RWStore.properties" + system "sed -i .bak 's|<%= BD_HOME %>|#{prefix}|' #{prefix}/var/jetty/WEB-INF/RWStore.properties ; rm #{prefix}/var/jetty/WEB-INF/RWStore.properties.bak" # Set the installation path as the root for log files (<bigdata_home>/log): - system "sed -i .bk 's|<%= BD_HOME %>|#{prefix}|' #{prefix}/var/jetty/WEB-INF/classes/log4j.properties" + system "sed -i .bak 's|<%= BD_HOME %>|#{prefix}|' #{prefix}/var/jetty/WEB-INF/classes/log4j.properties; rm #{prefix}/var/jetty/WEB-INF/classes/log4j.properties.bak " end def caveats; <<-EOS.undent - After launching, visit the Bigdata Workbench at: + After launching, visit the Bigdata Workbench at: http://localhost:8080/bigdata @@ -45,7 +49,7 @@ To tune the server configuration, edit the "#{prefix}/var/jetty/WEB-INF/RWStore.properties" file. Further documentation: - + #{prefix}/doc EOS end Modified: branches/RDR/src/resources/deployment/nss/bin/bigdata =================================================================== --- branches/RDR/src/resources/deployment/nss/bin/bigdata 2014-04-30 11:22:55 UTC (rev 8149) +++ branches/RDR/src/resources/deployment/nss/bin/bigdata 2014-04-30 15:45:05 UTC (rev 8150) @@ -24,7 +24,8 @@ # # the following template line will be replaced by a deployer application (e.g. brew, chef) # -export BD_HOME="<%= BD_HOME %>" +export INSTALL_TYPE="BREW" +export BD_HOME="/usr/local/Cellar/bigdata/1.3.0" pidFile=${BD_HOME}/var/lock/pid binDir=${BD_HOME}/bin @@ -47,8 +48,9 @@ fi fi if [ ! -f "$pidFile" ]; then - echo $"`date` : `hostname` : bringing up services: " + echo -ne $"`date` : `hostname` : bringing bigdata services up ... " $binDir/startNSS + echo "done!" else echo $"`date` : `hostname` : running as $pid" fi @@ -65,9 +67,10 @@ echo $"`date` : `hostname` : $pid died?" rm -f "$pidFile" else - echo $"`date` : `hostname` : bringing down services: " + echo -ne $"`date` : `hostname` : bringing bigdata service down ... " kill $pid rm -f "$pidFile" + echo "done!" fi fi ;; Modified: branches/RDR/src/resources/deployment/nss/bin/startNSS =================================================================== --- branches/RDR/src/resources/deployment/nss/bin/startNSS 2014-04-30 11:22:55 UTC (rev 8149) +++ branches/RDR/src/resources/deployment/nss/bin/startNSS 2014-04-30 15:45:05 UTC (rev 8150) @@ -1,7 +1,11 @@ #!/bin/bash export INSTALL_DIR=${BD_HOME} -export LIB_DIR=${INSTALL_DIR}/lib +if [ $INSTALL_TYPE == "BREW" ]; then + export LIB_DIR=${INSTALL_DIR}/libexec +else + export LIB_DIR=${INSTALL_DIR}/lib +fi export JETTY_CLASSPATH=`find ${LIB_DIR} -name '*.jar' -print0 | tr '\0' ':'` export JETTY_DIR=${INSTALL_DIR}/var/jetty export CONFIG_DIR=${INSTALL_DIR}/var/config @@ -66,10 +70,8 @@ $NSS_PROPERTIES\ " -echo "Running: $cmd" +# echo "Running: $cmd" $cmd > /dev/null 2>&1 & -# $cmd& pid=$! -echo "PID=$pid" +# echo "PID=$pid" echo "$pid">$pidFile -exit This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-30 11:22:59
|
Revision: 8149 http://sourceforge.net/p/bigdata/code/8149 Author: dmekonnen Date: 2014-04-30 11:22:55 +0000 (Wed, 30 Apr 2014) Log Message: ----------- Updates to set installation paths. Modified Paths: -------------- branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb Modified: branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb =================================================================== --- branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb 2014-04-30 03:47:20 UTC (rev 8148) +++ branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb 2014-04-30 11:22:55 UTC (rev 8149) @@ -1,16 +1,25 @@ require 'formula' -# Documentation: https://github.com/mxcl/homebrew/wiki/Formula-Cookbook -# /usr/local/Library/Contributions/example-formula.rb -# PLEASE REMOVE ALL GENERATED COMMENTS BEFORE SUBMITTING YOUR PULL REQUEST! - class Bigdata < Formula homepage 'http://bigdata.com/blog/' url 'http://bigdata.com/deploy/bigdata-1.3.0.tgz' - sha1 'a395a243a2746ce47cf8893f2207fd2e0de4a9c1' + sha1 '466cbce9241e3d418a53a88d81b108f42f4e9f4a' def install prefix.install Dir['*'] + + # Set the installation path as the root for the bin scripts: + system "sed -i .bk 's|<%= BD_HOME %>|#{prefix}|' #{bin}/bigdata" + + + # Set the Jetty root as the resourceBase in the jetty.xml file: + system "sed -i .bk 's|<%= JETTY_DIR %>|#{prefix}/var/jetty|' #{prefix}/var/jetty/etc/jetty.xml" + + # Set the installation path as the root for bigdata.jnl file location (<bigdata_home>/data): + system "sed -i .bk 's|<%= BD_HOME %>|#{prefix}|' #{prefix}/var/jetty/WEB-INF/RWStore.properties" + + # Set the installation path as the root for log files (<bigdata_home>/log): + system "sed -i .bk 's|<%= BD_HOME %>|#{prefix}|' #{prefix}/var/jetty/WEB-INF/classes/log4j.properties" end def caveats; <<-EOS.undent @@ -33,11 +42,11 @@ % bigdata restart - To tune the server configuration, edit the "#{var}/jetty/WEB-INF/RWStore.properties" file. + To tune the server configuration, edit the "#{prefix}/var/jetty/WEB-INF/RWStore.properties" file. Further documentation: - #{doc} + #{prefix}/doc EOS end This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-30 03:47:24
|
Revision: 8148 http://sourceforge.net/p/bigdata/code/8148 Author: dmekonnen Date: 2014-04-30 03:47:20 +0000 (Wed, 30 Apr 2014) Log Message: ----------- jetty resource base path correction. "[!" becomes "[ !" in negation statements. Modified Paths: -------------- branches/RDR/src/resources/deployment/nss/bin/startNSS Modified: branches/RDR/src/resources/deployment/nss/bin/startNSS =================================================================== --- branches/RDR/src/resources/deployment/nss/bin/startNSS 2014-04-30 03:41:09 UTC (rev 8147) +++ branches/RDR/src/resources/deployment/nss/bin/startNSS 2014-04-30 03:47:20 UTC (rev 8148) @@ -11,12 +11,12 @@ export LOG_DIR=${BD_HOME}/var/log -if [! -d $LOG_DIR ]; then +if [ ! -d $LOG_DIR ]; then mkdir -p $LOG_DIR fi export DATA_DIR=${BD_HOME}/var/data -if [! -d $DATA_DIR ]; then +if [ ! -d $DATA_DIR ]; then mkdir -p $DATA_DIR fi @@ -33,7 +33,7 @@ export JETTY_XML="${JETTY_DIR}/etc/jetty.xml" fi if [ -z "${JETTY_RESOURCE_BASE}" ]; then - export JETTY_RESOURCE_BASE="${JETTY_DIR}/var/jetty" + export JETTY_RESOURCE_BASE="${JETTY_DIR}" fi @@ -52,7 +52,7 @@ # Setup the directory for the pid of the ServiceStarter process. lockDir=${INSTALL_DIR}/var/lock -if [! -d $lockDir ]; then +if [ ! -d $lockDir ]; then mkdir -p $lockDir fi pidFile=$lockDir/pid This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-30 03:41:13
|
Revision: 8147 http://sourceforge.net/p/bigdata/code/8147 Author: dmekonnen Date: 2014-04-30 03:41:09 +0000 (Wed, 30 Apr 2014) Log Message: ----------- added conditional directory creation Modified Paths: -------------- branches/RDR/src/resources/deployment/nss/bin/startNSS Modified: branches/RDR/src/resources/deployment/nss/bin/startNSS =================================================================== --- branches/RDR/src/resources/deployment/nss/bin/startNSS 2014-04-30 03:31:43 UTC (rev 8146) +++ branches/RDR/src/resources/deployment/nss/bin/startNSS 2014-04-30 03:41:09 UTC (rev 8147) @@ -8,10 +8,17 @@ export LOGGING_CONFIG=${CONFIG_DIR}/logging/logging.properties export LOG4J_CONFIG=${JETTY_DIR}/WEB-INF/classes/log4j.properties + + export LOG_DIR=${BD_HOME}/var/log -mkdir -p $LOG_DIR +if [! -d $LOG_DIR ]; then + mkdir -p $LOG_DIR +fi + export DATA_DIR=${BD_HOME}/var/data -mkdir -p $DATA_DIR +if [! -d $DATA_DIR ]; then + mkdir -p $DATA_DIR +fi export NSS="com.bigdata.rdf.sail.webapp.NanoSparqlServer" export NSS_NAMESPACE="kb" @@ -23,7 +30,7 @@ export JETTY_PORT="8080" fi if [ -z "${JETTY_XML}" ]; then - export JETTY_XML="${JETTY_DIR}/jetty.xml" + export JETTY_XML="${JETTY_DIR}/etc/jetty.xml" fi if [ -z "${JETTY_RESOURCE_BASE}" ]; then export JETTY_RESOURCE_BASE="${JETTY_DIR}/var/jetty" @@ -45,7 +52,9 @@ # Setup the directory for the pid of the ServiceStarter process. lockDir=${INSTALL_DIR}/var/lock -mkdir -p $lockDir +if [! -d $lockDir ]; then + mkdir -p $lockDir +fi pidFile=$lockDir/pid cmd="java ${JAVA_OPTS} \ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-30 03:31:54
|
Revision: 8146 http://sourceforge.net/p/bigdata/code/8146 Author: dmekonnen Date: 2014-04-30 03:31:43 +0000 (Wed, 30 Apr 2014) Log Message: ----------- Additions to build an NSS (only) package for Brew and Chef installers. Modified Paths: -------------- branches/RDR/build.xml Added Paths: ----------- branches/RDR/src/resources/deployment/ branches/RDR/src/resources/deployment/nss/ branches/RDR/src/resources/deployment/nss/WEB-INF/ branches/RDR/src/resources/deployment/nss/WEB-INF/RWStore.properties branches/RDR/src/resources/deployment/nss/WEB-INF/classes/ branches/RDR/src/resources/deployment/nss/WEB-INF/classes/log4j.properties branches/RDR/src/resources/deployment/nss/bin/ branches/RDR/src/resources/deployment/nss/bin/bigdata branches/RDR/src/resources/deployment/nss/bin/startNSS branches/RDR/src/resources/deployment/nss/etc/ branches/RDR/src/resources/deployment/nss/etc/jetty.xml Modified: branches/RDR/build.xml =================================================================== --- branches/RDR/build.xml 2014-04-29 23:37:02 UTC (rev 8145) +++ branches/RDR/build.xml 2014-04-30 03:31:43 UTC (rev 8146) @@ -1215,12 +1215,21 @@ src="http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer?printable=yes" /> - <!-- Note: Commented out. This is breaking the RDR build. --> - <!-- Stage files specific to NSS deployments provided by Brew and Chef. - <copy file="${src.resources}/bin/bigdata" + <!-- Stage files specific to NSS deployments provided by Brew and Chef. --> + <chmod file="${dist.bin}/bigdata" perm="755" /> + <copy file="${src.resources}/deployment/nss/bin/bigdata" todir="${dist.bin}" /> <chmod file="${dist.bin}/bigdata" perm="755" /> ---> + <copy file="${src.resources}/deployment/nss/bin/startNSS" + todir="${dist.bin}" /> + <chmod file="${dist.bin}/startNSS" perm="755" /> + <copy file="${src.resources}/deployment/nss/etc/jetty.xml" + todir="${dist.var.jetty}/etc" /> + <copy file="${src.resources}/deployment/nss/WEB-INF/RWStore.properties" + todir="${dist.var.jetty}/WEB-INF" /> + <copy file="${src.resources}/deployment/nss/WEB-INF/classes/log4j.properties" + todir="${dist.var.jetty}/WEB-INF/classes" /> + </target> <!-- --> @@ -1318,12 +1327,17 @@ <exclude name="bigdata/lib-dl" /> <exclude name="bigdata/lib-ext" /> <include name="bigdata/var/jetty/**" /> + <include name="bigdata/var/config/logging/logging.properties" /> + <exclude name="bigdata/var/jetty/jetty.xml" /> + <exclude name="bigdata/var/jetty/html/new.html" /> + <exclude name="bigdata/var/jetty/html/old.html" /> </tarfileset> <!-- Add scripts separately, making them executable --> <tarfileset dir="${bigdata.dir}/dist" filemode="755"> <include name="bigdata/bin/bigdata" /> + <include name="bigdata/bin/startNSS" /> </tarfileset> </tar> Added: branches/RDR/src/resources/deployment/nss/WEB-INF/RWStore.properties =================================================================== --- branches/RDR/src/resources/deployment/nss/WEB-INF/RWStore.properties (rev 0) +++ branches/RDR/src/resources/deployment/nss/WEB-INF/RWStore.properties 2014-04-30 03:31:43 UTC (rev 8146) @@ -0,0 +1,40 @@ +# +# Note: These options are applied when the journal and the triple store are +# first created. + +## +## Journal options. +## + +# The backing file. This contains all your data. You want to put this someplace +# safe. The default locator will wind up in the directory from which you start +# your servlet container. +com.bigdata.journal.AbstractJournal.file=<%= BD_HOME %>/var/data/bigdata.jnl + +# The persistence engine. Use 'Disk' for the WORM or 'DiskRW' for the RWStore. +com.bigdata.journal.AbstractJournal.bufferMode=DiskRW + +# Setup for the RWStore recycler rather than session protection. +com.bigdata.service.AbstractTransactionService.minReleaseAge=1 + +com.bigdata.btree.writeRetentionQueue.capacity=4000 +com.bigdata.btree.BTree.branchingFactor=128 + +# 200M initial extent. +com.bigdata.journal.AbstractJournal.initialExtent=209715200 +com.bigdata.journal.AbstractJournal.maximumExtent=209715200 + +## +## Setup for QUADS mode without the full text index. +## +com.bigdata.rdf.sail.truthMaintenance=false +com.bigdata.rdf.store.AbstractTripleStore.quads=true +com.bigdata.rdf.store.AbstractTripleStore.statementIdentifiers=false +com.bigdata.rdf.store.AbstractTripleStore.textIndex=false +com.bigdata.rdf.store.AbstractTripleStore.axiomsClass=com.bigdata.rdf.axioms.NoAxioms + +# Bump up the branching factor for the lexicon indices on the default kb. +com.bigdata.namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor=400 + +# Bump up the branching factor for the statement indices on the default kb. +com.bigdata.namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor=1024 Added: branches/RDR/src/resources/deployment/nss/WEB-INF/classes/log4j.properties =================================================================== --- branches/RDR/src/resources/deployment/nss/WEB-INF/classes/log4j.properties (rev 0) +++ branches/RDR/src/resources/deployment/nss/WEB-INF/classes/log4j.properties 2014-04-30 03:31:43 UTC (rev 8146) @@ -0,0 +1,97 @@ +# Default log4j configuration. See the individual classes for the +# specific loggers, but generally they are named for the class in +# which they are defined. + +# Default log4j configuration for testing purposes. +# +# You probably want to set the default log level to ERROR. +# +log4j.rootCategory=WARN, file +#log4j.rootCategory=WARN, dest2 + +# Loggers. +# Note: logging here at INFO or DEBUG will significantly impact throughput! +log4j.logger.com.bigdata=WARN +log4j.logger.com.bigdata.btree=WARN +log4j.logger.com.bigdata.counters.History=ERROR +log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=ERROR +log4j.logger.com.bigdata.counters.query.CounterSetQuery=INFO +log4j.logger.com.bigdata.journal.CompactTask=INFO +log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=ERROR +log4j.logger.com.bigdata.rdf.load=INFO +log4j.logger.com.bigdata.rdf.store.DataLoader=INFO +log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=INFO + +# file +log4j.appender.file=org.apache.log4j.RollingFileAppender +log4j.appender.file.File=<%= BD_HOME %>/var/log/bigdata.log +log4j.appender.file.MaxFileSize=4MB +log4j.appender.file.MaxBackupIndex=10 +log4j.appender.file.layout=org.apache.log4j.PatternLayout +log4j.appender.file.layout.ConversionPattern=%d{MMM dd, yyyy HH:mm:ss} %-5p: %F:%L: %m%n + + +# Normal data loader (single threaded). +#log4j.logger.com.bigdata.rdf.store.DataLoader=INFO + +# dest1 +log4j.appender.dest1=org.apache.log4j.ConsoleAppender +log4j.appender.dest1.layout=org.apache.log4j.PatternLayout +log4j.appender.dest1.layout.ConversionPattern=%-5p: %F:%L: %m%n +#log4j.appender.dest1.layout.ConversionPattern=%-5p: %r %l: %m%n +#log4j.appender.dest1.layout.ConversionPattern=%-5p: %m%n +#log4j.appender.dest1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n +#log4j.appender.dest1.layout.ConversionPattern=%-4r(%d) [%t] %-5p %c(%l:%M) %x - %m%n + +# dest2 includes the thread name and elapsed milliseconds. +# Note: %r is elapsed milliseconds. +# Note: %t is the thread name. +# See http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/PatternLayout.html +log4j.appender.dest2=org.apache.log4j.ConsoleAppender +log4j.appender.dest2.layout=org.apache.log4j.PatternLayout +log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n + +## +# Rule execution log. This is a formatted log file (comma delimited). +log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog +log4j.additivity.com.bigdata.relation.rule.eval.RuleLog=false +log4j.appender.ruleLog=org.apache.log4j.FileAppender +log4j.appender.ruleLog.Threshold=ALL +log4j.appender.ruleLog.File=<%= BD_HOME %>/var/log/rules.log +log4j.appender.ruleLog.Append=true +# I find that it is nicer to have this unbuffered since you can see what +# is going on and to make sure that I have complete rule evaluation logs +# on shutdown. +log4j.appender.ruleLog.BufferedIO=false +log4j.appender.ruleLog.layout=org.apache.log4j.PatternLayout +log4j.appender.ruleLog.layout.ConversionPattern=%m + +## +# Summary query evaluation log (tab delimited file). Uncomment the next line to enable. +#log4j.logger.com.bigdata.bop.engine.QueryLog=INFO,queryLog +log4j.additivity.com.bigdata.bop.engine.QueryLog=false +log4j.appender.queryLog=org.apache.log4j.FileAppender +log4j.appender.queryLog.Threshold=ALL +log4j.appender.queryLog.File=<%= BD_HOME %>var/log/queryLog.csv +log4j.appender.queryLog.Append=true +# I find that it is nicer to have this unbuffered since you can see what +# is going on and to make sure that I have complete rule evaluation logs +# on shutdown. +log4j.appender.queryLog.BufferedIO=false +log4j.appender.queryLog.layout=org.apache.log4j.PatternLayout +log4j.appender.queryLog.layout.ConversionPattern=%m + +## +# BOp run state trace (tab delimited file). Uncomment the next line to enable. +#log4j.logger.com.bigdata.bop.engine.RunState$TableLog=INFO,queryRunStateLog +log4j.additivity.com.bigdata.bop.engine.RunState$TableLog=false +log4j.appender.queryRunStateLog=org.apache.log4j.FileAppender +log4j.appender.queryRunStateLog.Threshold=ALL +log4j.appender.queryRunStateLog.File=<%= BD_HOME %>/var/log/queryRunState.log +log4j.appender.queryRunStateLog.Append=true +# I find that it is nicer to have this unbuffered since you can see what +# is going on and to make sure that I have complete rule evaluation logs +# on shutdown. +log4j.appender.queryRunStateLog.BufferedIO=false +log4j.appender.queryRunStateLog.layout=org.apache.log4j.PatternLayout +log4j.appender.queryRunStateLog.layout.ConversionPattern=%m Added: branches/RDR/src/resources/deployment/nss/bin/bigdata =================================================================== --- branches/RDR/src/resources/deployment/nss/bin/bigdata (rev 0) +++ branches/RDR/src/resources/deployment/nss/bin/bigdata 2014-04-30 03:31:43 UTC (rev 8146) @@ -0,0 +1,105 @@ +#!/bin/bash + +# init.d style script for bigdata HA services. The script can be used +# to 'start' or 'stop' services. +# +# Environment: +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Misc. +# +# See http://tldp.org/LDP/abs/html/index.html +# +# Note: Blank lines are significant in shell scripts. +# +# Note: Children must do "exit 0" to indicate success. +# +# Note: Convert DOS cr-lf to unix style in emacs: C-x RET f then unix + +# Source function library (just used for 'action'). If you don't have this +# it SHOULD automatically use the inline definition for "action()". + +# +# the following template line will be replaced by a deployer application (e.g. brew, chef) +# +export BD_HOME="<%= BD_HOME %>" +pidFile=${BD_HOME}/var/lock/pid +binDir=${BD_HOME}/bin + + +# +# See how we were called. +# +case "$1" in + start) +# +# Start the ServiceStarter and child services if not running. +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then +# The process has died so remove the old pid file. + echo $"`date` : `hostname` : $pid died?" + rm -f "$pidFile" + fi + fi + if [ ! -f "$pidFile" ]; then + echo $"`date` : `hostname` : bringing up services: " + $binDir/startNSS + else + echo $"`date` : `hostname` : running as $pid" + fi + ;; + stop) +# +# Stop the ServiceStarter and all child services. +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then +# The process has died so remove the old pid file. + echo $"`date` : `hostname` : $pid died?" + rm -f "$pidFile" + else + echo $"`date` : `hostname` : bringing down services: " + kill $pid + rm -f "$pidFile" + fi + fi + ;; + status) +# +# Report status for the ServicesManager (up or down). +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then + echo $"`date` : `hostname` : process died? pid=$pid." + else + echo $"`date` : `hostname` : running as $pid." + fi + else + echo $"`date` : `hostname` : not running." + fi + ;; +# +# Simply stop then start. +# + restart) + $0 stop + $0 start + ;; + *) +# +# Usage +# + echo $"Usage: $0 {start|stop|status|restart}" + exit 1 +esac + +exit 0 Property changes on: branches/RDR/src/resources/deployment/nss/bin/bigdata ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Added: branches/RDR/src/resources/deployment/nss/bin/startNSS =================================================================== --- branches/RDR/src/resources/deployment/nss/bin/startNSS (rev 0) +++ branches/RDR/src/resources/deployment/nss/bin/startNSS 2014-04-30 03:31:43 UTC (rev 8146) @@ -0,0 +1,66 @@ +#!/bin/bash + +export INSTALL_DIR=${BD_HOME} +export LIB_DIR=${INSTALL_DIR}/lib +export JETTY_CLASSPATH=`find ${LIB_DIR} -name '*.jar' -print0 | tr '\0' ':'` +export JETTY_DIR=${INSTALL_DIR}/var/jetty +export CONFIG_DIR=${INSTALL_DIR}/var/config + +export LOGGING_CONFIG=${CONFIG_DIR}/logging/logging.properties +export LOG4J_CONFIG=${JETTY_DIR}/WEB-INF/classes/log4j.properties +export LOG_DIR=${BD_HOME}/var/log +mkdir -p $LOG_DIR +export DATA_DIR=${BD_HOME}/var/data +mkdir -p $DATA_DIR + +export NSS="com.bigdata.rdf.sail.webapp.NanoSparqlServer" +export NSS_NAMESPACE="kb" +export NSS_PROPERTIES=${JETTY_DIR}/WEB-INF/RWStore.properties + +export JVM_OPTS="-Djava.awt.headless=true -server -Xmx4G -XX:MaxDirectMemorySize=3000m -XX:+UseG1GC" + +if [ -z "${JETTY_PORT}" ]; then + export JETTY_PORT="8080" +fi +if [ -z "${JETTY_XML}" ]; then + export JETTY_XML="${JETTY_DIR}/jetty.xml" +fi +if [ -z "${JETTY_RESOURCE_BASE}" ]; then + export JETTY_RESOURCE_BASE="${JETTY_DIR}/var/jetty" +fi + + +export JETTY_OPTS="\ + -Djetty.port=${JETTY_PORT}\ + -Djetty.resourceBase=${JETTY_RESOURCE_BASE}\ + -DJETTY_XML=${JETTY_XML}\ + -Djava.util.logging.config.file=${LOGGING_CONFIG}\ + -Dlog4j.configuration=${LOG4J_CONFIG}\ +" + +export JAVA_OPTS="\ + ${JVM_OPTS}\ + ${JETTY_OPTS}\ +" + +# Setup the directory for the pid of the ServiceStarter process. +lockDir=${INSTALL_DIR}/var/lock +mkdir -p $lockDir +pidFile=$lockDir/pid + +cmd="java ${JAVA_OPTS} \ + -cp ${JETTY_CLASSPATH} \ + $NSS \ + -jettyXml ${JETTY_XML} \ + $JETTY_PORT \ + $NSS_NAMESPACE \ + $NSS_PROPERTIES\ +" + +echo "Running: $cmd" +$cmd > /dev/null 2>&1 & +# $cmd& +pid=$! +echo "PID=$pid" +echo "$pid">$pidFile +exit Property changes on: branches/RDR/src/resources/deployment/nss/bin/startNSS ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Added: branches/RDR/src/resources/deployment/nss/etc/jetty.xml =================================================================== --- branches/RDR/src/resources/deployment/nss/etc/jetty.xml (rev 0) +++ branches/RDR/src/resources/deployment/nss/etc/jetty.xml 2014-04-30 03:31:43 UTC (rev 8146) @@ -0,0 +1,133 @@ +<?xml version="1.0"?> +<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure.dtd"> +<!-- See http://www.eclipse.org/jetty/documentation/current/ --> +<!-- See http://wiki.eclipse.org/Jetty/Reference/jetty.xml_syntax --> +<Configure id="Server" class="org.eclipse.jetty.server.Server"> + + <!-- =========================================================== --> + <!-- Configure the Server Thread Pool. --> + <!-- The server holds a common thread pool which is used by --> + <!-- default as the executor used by all connectors and servlet --> + <!-- dispatches. --> + <!-- --> + <!-- Configuring a fixed thread pool is vital to controlling the --> + <!-- maximal memory footprint of the server and is a key tuning --> + <!-- parameter for tuning. In an application that rarely blocks --> + <!-- then maximal threads may be close to the number of 5*CPUs. --> + <!-- In an application that frequently blocks, then maximal --> + <!-- threads should be set as high as possible given the memory --> + <!-- available. --> + <!-- --> + <!-- Consult the javadoc of o.e.j.util.thread.QueuedThreadPool --> + <!-- for all configuration that may be set here. --> + <!-- =========================================================== --> + <!-- uncomment to change type of threadpool --> + <Arg name="threadpool"><New id="threadpool" class="org.eclipse.jetty.util.thread.QueuedThreadPool"/></Arg> + <!-- --> + <Get name="ThreadPool"> + <Set name="minThreads" type="int"><SystemProperty name="jetty.threads.min" default="10"/></Set> + <Set name="maxThreads" type="int"><SystemProperty name="jetty.threads.max" default="64"/></Set> + <Set name="idleTimeout" type="int"><SystemProperty name="jetty.threads.timeout" default="60000"/></Set> + <Set name="detailedDump">false</Set> + </Get> + + <!-- =========================================================== --> + <!-- Http Configuration. --> + <!-- This is a common configuration instance used by all --> + <!-- connectors that can carry HTTP semantics (HTTP, HTTPS, SPDY)--> + <!-- It configures the non wire protocol aspects of the HTTP --> + <!-- semantic. --> + <!-- --> + <!-- Consult the javadoc of o.e.j.server.HttpConfiguration --> + <!-- for all configuration that may be set here. --> + <!-- =========================================================== --> + <New id="httpConfig" class="org.eclipse.jetty.server.HttpConfiguration"> + <Set name="secureScheme">https</Set> + <Set name="securePort"><Property name="jetty.secure.port" default="8443" /></Set> + <Set name="outputBufferSize"><Property name="jetty.output.buffer.size" default="32768" /></Set> + <Set name="requestHeaderSize"><Property name="jetty.request.header.size" default="8192" /></Set> + <Set name="responseHeaderSize"><Property name="jetty.response.header.size" default="8192" /></Set> + <Set name="sendServerVersion"><Property name="jetty.send.server.version" default="true" /></Set> + <Set name="sendDateHeader"><Property name="jetty.send.date.header" default="false" /></Set> + <Set name="headerCacheSize">512</Set> + <!-- Uncomment to enable handling of X-Forwarded- style headers + <Call name="addCustomizer"> + <Arg><New class="org.eclipse.jetty.server.ForwardedRequestCustomizer"/></Arg> + </Call> + --> + </New> + + <!-- Configure the HTTP endpoint. --> + <Call name="addConnector"> + <Arg> + <New class="org.eclipse.jetty.server.ServerConnector"> + <Arg name="server"><Ref refid="Server" /></Arg> + <Arg name="factories"> + <Array type="org.eclipse.jetty.server.ConnectionFactory"> + <Item> + <New class="org.eclipse.jetty.server.HttpConnectionFactory"> + <Arg name="config"><Ref refid="httpConfig" /></Arg> + </New> + </Item> + </Array> + </Arg> + <Set name="host"><Property name="jetty.host" /></Set> + <Set name="port"><Property name="jetty.port" default="8080" /></Set> + <Set name="idleTimeout"><Property name="http.timeout" default="30000"/></Set> + </New> + </Arg> + </Call> + + <!-- =========================================================== --> + <!-- Set handler Collection Structure --> + <!-- =========================================================== --> + <Set name="handler"> + <New id="Handlers" class="org.eclipse.jetty.server.handler.HandlerCollection"> + <Set name="handlers"> + <Array type="org.eclipse.jetty.server.Handler"> + <Item> + <!-- This is the bigdata web application. --> + <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> + <Set name="resourceBase"> + <!-- The location of the top-level of the bigdata webapp. --> + <Property name="jetty.resourceBase" default="<%= JETTY_DIR %>" /> + </Set> + <Set name="contextPath">/bigdata</Set> + <Set name="descriptor"><%= JETTY_DIR %>/WEB-INF/web.xml</Set> + <Set name="parentLoaderPriority">true</Set> + <Set name="extractWAR">false</Set> + </New> + </Item> + <Item> + <!-- This appears to be necessary in addition to the above. --> + <!-- Without this, it will not resolve http://localhost:8080/ --> + <!-- and can fail to deliver some of the static content. --> + <New id="ResourceHandler" class="org.eclipse.jetty.server.handler.ResourceHandler"> + <Set name="resourceBase"> + <!-- The location of the top-level of the bigdata webapp. --> + <Property name="jetty.resourceBase" default="<%= JETTY_DIR %>" /> + </Set> + <Set name="welcomeFiles"> + <Array type="java.lang.String"> + <Item>html/index.html</Item> + </Array> + </Set> + </New> + </Item> + <!-- <Item> + <New id="DefaultHandler" class="org.eclipse.jetty.server.handler.DefaultHandler"></New> + </Item> --> + </Array> + </Set> + </New> + </Set> + + <!-- =========================================================== --> + <!-- extra server options --> + <!-- =========================================================== --> + <Set name="stopAtShutdown">true</Set> + <Set name="stopTimeout">5000</Set> + <Set name="dumpAfterStart"><Property name="jetty.dump.start" default="false"/></Set> + <Set name="dumpBeforeStop"><Property name="jetty.dump.stop" default="false"/></Set> + +</Configure> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-29 23:37:04
|
Revision: 8145 http://sourceforge.net/p/bigdata/code/8145 Author: tobycraig Date: 2014-04-29 23:37:02 +0000 (Tue, 29 Apr 2014) Log Message: ----------- Improved pagination Modified Paths: -------------- branches/RDR/bigdata-war/src/html/index.html branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-04-29 23:20:01 UTC (rev 8144) +++ branches/RDR/bigdata-war/src/html/index.html 2014-04-29 23:37:02 UTC (rev 8145) @@ -109,12 +109,12 @@ </div> <div id="query-pagination" class="box"> - <span id="current-results"></span> + Total results: <span id="total-results"></span>, displaying <span id="current-results"></span> <select id="results-per-page"> - <option>10</option> <option>25</option> - <option>50</option> + <option selected>50</option> <option>100</option> + <option>all</option> </select> per page <div id="page-selector"> <button id="previous-page">◀</button> Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-29 23:20:01 UTC (rev 8144) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-29 23:37:02 UTC (rev 8145) @@ -2,7 +2,7 @@ // global variables var DEFAULT_NAMESPACE, NAMESPACE, NAMESPACE_URL, NAMESPACES_READY, NAMESPACE_SHORTCUTS, FILE_CONTENTS, QUERY_RESULTS; -var PAGE_SIZE=10, TOTAL_PAGES, CURRENT_PAGE; +var PAGE_SIZE = 50, TOTAL_PAGES, CURRENT_PAGE; /* Modal functions */ @@ -686,6 +686,7 @@ thead.append(tr); table.append(thead); + $('#total-results').html(data.results.bindings.length); setNumberOfPages(); showPage(1); @@ -737,9 +738,13 @@ } function setPageSize(n) { - n = parseInt(n, 10); - if(typeof n != 'number' || n % 1 != 0 || n < 1 || n == PAGE_SIZE) { - return; + if(n == 'all') { + n = QUERY_RESULTS.results.bindings.length; + } else { + n = parseInt(n, 10); + if(typeof n != 'number' || n % 1 != 0 || n < 1 || n == PAGE_SIZE) { + return; + } } PAGE_SIZE = n; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-29 23:20:06
|
Revision: 8144 http://sourceforge.net/p/bigdata/code/8144 Author: tobycraig Date: 2014-04-29 23:20:01 +0000 (Tue, 29 Apr 2014) Log Message: ----------- Changed workbench styling Modified Paths: -------------- branches/RDR/bigdata-war/src/html/css/style.css branches/RDR/bigdata-war/src/html/images/logo.png branches/RDR/bigdata-war/src/html/index.html branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/css/style.css =================================================================== --- branches/RDR/bigdata-war/src/html/css/style.css 2014-04-29 13:04:23 UTC (rev 8143) +++ branches/RDR/bigdata-war/src/html/css/style.css 2014-04-29 23:20:01 UTC (rev 8144) @@ -51,7 +51,7 @@ /* Workbench */ body { - margin: 50px 10px; + margin: 10px; background-color: #f1f1f1; font-family: sans-serif; font-size: 80%; @@ -69,7 +69,6 @@ } #container { - max-width: 1000px; margin: 0 auto; } @@ -86,10 +85,6 @@ padding-top: 80px; } -#search-form label { - font-size: 50%; -} - #search-form input { border: 1px solid #e3e3e3; margin: 0; @@ -150,11 +145,11 @@ display: none; clear: both; background: white; + padding: 20px 0; } .box { - padding: 20px 55px; - border-bottom: none; + margin: 20px; overflow-x: scroll; } @@ -226,21 +221,21 @@ box-sizing: border-box; } -label[for=load-type], label[for=rdf-type] { - margin: 0 10px 0 30px; +hr { + background: #929292; + border: none; + height: 5px; + width: 50%; + margin: 20px auto; } -.has-vertical-divider { - line-height: 55px; +#load-load { + margin: 0 auto; + display: block; } -.vertical-divider { - height: 55px; - display: inline-block; - width: 0; - border-left: 1px solid #d7d7d7; - padding-left: 30px; - margin-left: 30px; +#load-buttons { + text-align: center; } .bottom { @@ -255,7 +250,7 @@ border: none; } -#advanced-features, #query-response, #query-pagination, #query-explanation, #query-export-container, #load-response, #load-clear, #explore-results, #namespace-properties { +#advanced-features, #query-response, #query-pagination, #query-explanation, #query-export-container, #load-response, #load-clear-container, #explore-results, #namespace-properties { display: none; } @@ -306,14 +301,6 @@ background-color: red; } -#advanced-features { - margin-right: 50px; -} - -#advanced-features label { - margin-left: 20px; -} - #running-queries li { margin: 10px 0; } @@ -334,103 +321,17 @@ overflow-x: scroll; } -.controls-container { - padding: 25px; -} - -button, input[type=reset], input[type=submit], input[type=file], #load-file-container, select { - width: 110px; - height: 25px; - border: 1px solid #e4e4e4; - color: #616161; -} - -#load-file-container { - display: inline-block; - position: relative; - text-align: center; - line-height: 25px; -} - -#load-file { - position: absolute; - top: 0; - left: 0; - opacity: 0; -} - -input[type=checkbox] { - display: none; -} - -input[type="checkbox"] + label span { - display: inline-block; - width: 13px; - height: 13px; - margin: 0; - vertical-align: middle; - background-image: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA0AAAANCAIAAAD9iXMrAAAA4UlEQVQoU11RARLCIAzr/1+hMHF7iN6x0rLph0wp9Tx3jLVrSNNALE24YR/PzlwRV9nftTPz42gnt2dj0iaXg5LSTagobWpxOqk02vr8CSiBANGqljsaaznIf95HClZCszLy3GnpVi7B7VCwQgBBShqg62uWs07QXI2g1bB+LgdZjtg0dNOKnvbm0beEmgkNARANB4wv95n/ifMYDmBWgnlewL4Gq6cYyw/DVxIRT5YfVh/rq9L0fflKWAg+H8sDw6kYzu/DTUbZJ0hxGFVgDIuPLeHOirHQBTt8HUHFxWLeD+xTIgvbAhziAAAAAElFTkSuQmCC"); -} - -input[type="checkbox"]:checked + label span { - background-image: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA0AAAANCAIAAAD9iXMrAAABb0lEQVQoFQXBTYuNYRjA8f913S/neRyUD2AzRVgoNTsZzUodG76Jz2QpZWEhNmNBaM682KCMEsnCHKYx5nm77+vy+8mbnf1spaZK0ShopQY1XI0qroTkdqoas5VnB+tNwyxikQgFJKAFM8rEauTe2nYE5g1NAy0xIpAdQMCNbuCCEUTVBWugJTf8+ND+/UWc0R/y7VOriWZGO8PdFTQFzgSSIKHbe3S/H3j9ZCGx04gqrrig4qYFALh0Az1/uv3wTpqXy9cJDo5Fqpi6MCZGqII7a5vP//yeX7n1ApgCABBcVZw8IU4A4Oe72+L6dWczCGogaCG4KioiuFDh83tWh+cWDx4frc5+2Q0i4AyRGlzNrFSqcXLMx6271za2YsvVm0/3Xy2Oj6hOHMCqLJfLl9/X85w8oxGqUB0VELynM+oJGxeXMTljhxrSUxM1oAURCmBMI/8GFJW3u3vRJhcVN4jJy6iqSKHmkk1LttDH+h9YiLIwVyNFQAAAAABJRU5ErkJggg=="); -} - -#load-response > pre { - overflow-x: scroll; -} - .clear { clear: both; } -.right { - float: right; -} - -.orange { - background-color: #ffd8b3; - background-image: -webkit-gradient(linear, left top, left bottom, from(#ffffff), to(#ffd8b3)); - background-image: -webkit-linear-gradient(top, #ffffff, #ffd8b3); - background-image: -moz-linear-gradient(top, #ffffff, #ffd8b3); - background-image: -ms-linear-gradient(top, #ffffff, #ffd8b3); - background-image: -o-linear-gradient(top, #ffffff, #ffd8b3); - background-image: linear-gradient(to bottom, #ffffff, #ffd8b3); - filter:progid:DXImageTransform.Microsoft.gradient(GradientType=0,startColorstr=#ffffff, endColorstr=#ffd8b3); -} - -.blue { - background-color: #d3e4f6; - background-image: -webkit-gradient(linear, left top, left bottom, from(#ffffff), to(#d3e4f6)); - background-image: -webkit-linear-gradient(top, #ffffff, #d3e4f6); - background-image: -moz-linear-gradient(top, #ffffff, #d3e4f6); - background-image: -ms-linear-gradient(top, #ffffff, #d3e4f6); - background-image: -o-linear-gradient(top, #ffffff, #d3e4f6); - background-image: linear-gradient(to bottom, #ffffff, #d3e4f6); - filter:progid:DXImageTransform.Microsoft.gradient(GradientType=0,startColorstr=#ffffff, endColorstr=#d3e4f6); -} - -.green { - background-color: #d9e689; - background-image: -webkit-gradient(linear, left top, left bottom, from(#ffffff), to(#d9e689)); - background-image: -webkit-linear-gradient(top, #ffffff, #d9e689); - background-image: -moz-linear-gradient(top, #ffffff, #d9e689); - background-image: -ms-linear-gradient(top, #ffffff, #d9e689); - background-image: -o-linear-gradient(top, #ffffff, #d9e689); - background-image: linear-gradient(to bottom, #ffffff, #d9e689); - filter:progid:DXImageTransform.Microsoft.gradient(GradientType=0,startColorstr=#ffffff, endColorstr=#d9e689); -} - #query-pagination { border: 1px solid #e1e1e1; padding: 10px 25px; - margin: 0 55px; - color: #e1e1e1; } #results-per-page { width: 60px; - color: #e1e1e1; } #query-pagination button { @@ -438,10 +339,17 @@ height: 20px; border: 1px solid #e1e1e1; background: transparent; - color: #e1e1e1; } #current-page { border: 1px solid #e1e1e1; - color: #e1e1e1; } + +#query-export-container { + text-align: right; +} + +#load-clear-container { + text-align: right; +} + Modified: branches/RDR/bigdata-war/src/html/images/logo.png =================================================================== (Binary files differ) Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-04-29 13:04:23 UTC (rev 8143) +++ branches/RDR/bigdata-war/src/html/index.html 2014-04-29 23:20:01 UTC (rev 8144) @@ -40,22 +40,17 @@ <div id="load-errors"></div> <textarea id="load-box" placeholder="(Type in or drag a file containing RDF data, a SPARQL update or a file path or URL)"></textarea> </div> - - <div class="controls-container"> - <span id="load-file-container" class="orange">Upload a local file<input type="file" id="load-file"></span> - <button class="orange right">Clear</button> - <p id="large-file-message">Your file <span id="filename"></span> is too large to display here, but will be uploaded as normal. <a href="#" id="clear-file">Remove file</a></p> - </div> - - <div class="controls-container right has-vertical-divider"> - <label for="load-type">TYPE:</label> - <select id="load-type" class="blue"> + <p id="large-file-message">Your file <span id="filename"></span> is too large to display here, but will be uploaded as normal. <a href="#" id="clear-file">Remove file</a></p> + <p> + <input type="file" id="load-file"><br> + <label for="load-type">Type:</label> + <select id="load-type"> <option value="sparql" selected="selected">SPARQL Update</option> <option value="rdf">RDF Data</option> <option value="path">File Path or URL</option> </select> - <label for="rdf-type">FORMAT:</label> - <select id="rdf-type" class="blue"> + <label for="rdf-type">Format:</label> + <select id="rdf-type"> <option value="n-quads">N-Quads</option> <option value="n-triples">N-Triples</option> <option value="n3">Notation3</option> @@ -65,19 +60,20 @@ <option value="trix">TriX</option> <option value="turtle">Turtle</option> </select> - <span class="vertical-divider"> </span> - <button id="load-load" class="green">Load</button> - </div> + </p> + <hr class="shadow"> + <button id="load-load">Load</button> </div> <div class="box" id="load-response"> <pre></pre> - <div class="controls-container right"> - <button id="load-clear" class="orange">Clear output</button> - </div> </div> + <div class="box" id="load-clear-container"> + <button id="load-clear">Clear output</button> + </div> + </div> <div class="tab" id="query-tab"> @@ -90,19 +86,19 @@ <div id="query-errors"></div> <textarea id="query-box" name="query" placeholder="(Input a SPARQL query)"></textarea> - <div class="controls-container"> - <input type="reset" value="Clear" class="orange right"> + <a href="#" id="advanced-features-toggle">Advanced features</a> + + <div id="advanced-features"> + <input type="checkbox" id="query-explain"> <label for="query-explain">Explain</label> + <input type="checkbox" name="analytic" value="true" id="query-analytic"> <label for="query-analytic">Analytic</label> + <input type="checkbox" name="RTO" value="true" id="query-rto"> <label for="query-rto">Runtime Query Optimizer</label> </div> - <div class="controls-container"> - <div class="clear right"> - <a href="#" id="advanced-features-toggle">ADVANCED FEATURES</a>: - <span id="advanced-features"> - <input type="checkbox" id="query-explain"><label for="query-explain"><span></span> EXPLAIN</label> - <input type="checkbox" name="analytic" value="true" id="query-analytic"><label for="query-analytic"><span></span> ANALYTIC</label> - <input type="checkbox" name="RTO" value="true" id="query-rto"><label for="query-rto"><span></span> RUNTIME QUERY OPTIMIZER</label> - </span> - <input type="submit" value="Execute" class="green"> - </div> + + <hr class="shadow"> + + <div id="load-buttons"> + <input type="submit" value="Execute"> + <input type="reset" value="Clear"> </div> </form> @@ -131,10 +127,8 @@ </div> <div id="query-export-container" class="box"> - <div class="controls-container"> - <button id="query-export" class="right orange">Export</button> - <button id="query-response-clear" class="right clear orange">Clear</button> - </div> + <button id="query-export">Export</button> + <button id="query-response-clear">Clear</button> </div> </div> @@ -172,8 +166,6 @@ <div class="box" id="explore-attributes"></div> </div> - <div class="bottom"></div> - </div> <div class="tab" id="status-tab"> Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-29 13:04:23 UTC (rev 8143) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-29 23:20:01 UTC (rev 8144) @@ -919,7 +919,6 @@ // clear tables $('#explore-incoming, #explore-outgoing, #explore-attributes').html('<table>'); - $('#explore-tab .bottom').hide(); $('#explore-results, #explore-results .box').show(); // go through each binding, adding it to the appropriate table @@ -1026,7 +1025,6 @@ } function updateExploreError(jqXHR, textStatus, errorThrown) { - $('#explore-tab .bottom').show(); $('#explore-results .box').html('').hide(); $('#explore-header').text('Error! ' + textStatus + ' ' + jqXHR.statusText); $('#explore-results, #explore-header').show(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-29 13:04:26
|
Revision: 8143 http://sourceforge.net/p/bigdata/code/8143 Author: thompsonbry Date: 2014-04-29 13:04:23 +0000 (Tue, 29 Apr 2014) Log Message: ----------- Rolling back edit that broke the RDR build. Modified Paths: -------------- branches/RDR/build.xml Modified: branches/RDR/build.xml =================================================================== --- branches/RDR/build.xml 2014-04-29 03:37:31 UTC (rev 8142) +++ branches/RDR/build.xml 2014-04-29 13:04:23 UTC (rev 8143) @@ -1209,16 +1209,18 @@ <!-- Stage documentation from the wiki. --> <get dest="${dist.doc}/HAJournalServer.html" - src="http://wiki.bigdata.com/wiki/index.php/HAJournalServer" + src="http://wiki.bigdata.com/wiki/index.php/HAJournalServer?printable=yes" /> <get dest="${dist.doc}/NanoSparqlServer.html" - src="http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer" + src="http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer?printable=yes" /> - <!-- Stage files specific to NSS deployments provided by Brew and Chef. --> + <!-- Note: Commented out. This is breaking the RDR build. --> + <!-- Stage files specific to NSS deployments provided by Brew and Chef. <copy file="${src.resources}/bin/bigdata" todir="${dist.bin}" /> <chmod file="${dist.bin}/bigdata" perm="755" /> +--> </target> <!-- --> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-29 03:37:37
|
Revision: 8142 http://sourceforge.net/p/bigdata/code/8142 Author: dmekonnen Date: 2014-04-29 03:37:31 +0000 (Tue, 29 Apr 2014) Log Message: ----------- synching bigdata.rb updates to 1.3.0 branch. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/brew/bigdata.rb Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/brew/bigdata.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/brew/bigdata.rb 2014-04-29 03:33:29 UTC (rev 8141) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/brew/bigdata.rb 2014-04-29 03:37:31 UTC (rev 8142) @@ -4,36 +4,45 @@ # /usr/local/Library/Contributions/example-formula.rb # PLEASE REMOVE ALL GENERATED COMMENTS BEFORE SUBMITTING YOUR PULL REQUEST! -class SystapBigdata < Formula - homepage 'http://bigdata.com/bigdata/blog/' - url 'http://iweb.dl.sourceforge.net/project/bigdata/bigdata/1.3.0/REL.bigdata-1.3.0.tgz' - sha1 '605e800386300a6965125e0e9bfc06a268df0f08' +class Bigdata < Formula + homepage 'http://bigdata.com/blog/' + url 'http://bigdata.com/deploy/bigdata-1.3.0.tgz' + sha1 'a395a243a2746ce47cf8893f2207fd2e0de4a9c1' - # depends_on 'cmake' => :build - # depends_on :java7 # if your formula requires any X11/XQuartz components - def install - # Install the base files prefix.install Dir['*'] + end - # Setup the lib files - # (var+'lib/bigdata').mkpath + def caveats; <<-EOS.undent + After launching, visit the Bigdata Workbench at: + http://localhost:8080/bigdata - # Extract bigdata and install to sbin + "bigdata" command synopis: + ------------------------- - # sbin.install 'bigdata' - # (sbin/'bigdata').chmod 0755 - end + Start the server: - def caveats; <<-EOS.undent - After launching, visit the Bigdata Workbench at: http://localhost:8080/bigdata + % bigdata start + + Stop the server: + + % bigdata stop + + Restart the server: + + % bigdata restart + + To tune the server configuration, edit the "#{var}/jetty/WEB-INF/RWStore.properties" file. + + Further documentation: + + #{doc} EOS end + plist_options :startup => 'true', :manual => 'bigdata start' - plist_options :manual => 'bigdata' - def plist; <<-EOS.undent <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" @@ -43,17 +52,14 @@ <key>Label</key> <string>#{plist_name}</string> <key>Program</key> - <string>#{opt_sbin}/bigdata</string> + <string>#{bin}/bigdata</string> <key>RunAtLoad</key> <true/> - <key>EnvironmentVariables</key> - <dict> - <!-- need erl in the path --> - <key>PATH</key> - <string>/usr/local/sbin:/usr/bin:/bin:/usr/local/bin</string> - </dict> + <key>WorkingDirectory</key> + <string>#{prefix}</string> </dict> </plist> EOS end + end This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-29 03:33:33
|
Revision: 8141 http://sourceforge.net/p/bigdata/code/8141 Author: dmekonnen Date: 2014-04-29 03:33:29 +0000 (Tue, 29 Apr 2014) Log Message: ----------- Adding brew formula. deploy-artifact-nss target added to build.xml to produce a minimal archive for running NSS. Modified Paths: -------------- branches/RDR/build.xml Added Paths: ----------- branches/RDR/bigdata/src/resources/deployment/brew/ branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb Added: branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb =================================================================== --- branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb (rev 0) +++ branches/RDR/bigdata/src/resources/deployment/brew/bigdata.rb 2014-04-29 03:33:29 UTC (rev 8141) @@ -0,0 +1,65 @@ +require 'formula' + +# Documentation: https://github.com/mxcl/homebrew/wiki/Formula-Cookbook +# /usr/local/Library/Contributions/example-formula.rb +# PLEASE REMOVE ALL GENERATED COMMENTS BEFORE SUBMITTING YOUR PULL REQUEST! + +class Bigdata < Formula + homepage 'http://bigdata.com/blog/' + url 'http://bigdata.com/deploy/bigdata-1.3.0.tgz' + sha1 'a395a243a2746ce47cf8893f2207fd2e0de4a9c1' + + def install + prefix.install Dir['*'] + end + + def caveats; <<-EOS.undent + After launching, visit the Bigdata Workbench at: + + http://localhost:8080/bigdata + + "bigdata" command synopis: + ------------------------- + + Start the server: + + % bigdata start + + Stop the server: + + % bigdata stop + + Restart the server: + + % bigdata restart + + To tune the server configuration, edit the "#{var}/jetty/WEB-INF/RWStore.properties" file. + + Further documentation: + + #{doc} + EOS + end + + plist_options :startup => 'true', :manual => 'bigdata start' + + def plist; <<-EOS.undent + <?xml version="1.0" encoding="UTF-8"?> + <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" + "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> + <plist version="1.0"> + <dict> + <key>Label</key> + <string>#{plist_name}</string> + <key>Program</key> + <string>#{bin}/bigdata</string> + <key>RunAtLoad</key> + <true/> + <key>WorkingDirectory</key> + <string>#{prefix}</string> + </dict> + </plist> + EOS + end + +end Modified: branches/RDR/build.xml =================================================================== --- branches/RDR/build.xml 2014-04-29 02:10:07 UTC (rev 8140) +++ branches/RDR/build.xml 2014-04-29 03:33:29 UTC (rev 8141) @@ -1209,12 +1209,16 @@ <!-- Stage documentation from the wiki. --> <get dest="${dist.doc}/HAJournalServer.html" - src="http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=HAJournalServer&printable=yes" + src="http://wiki.bigdata.com/wiki/index.php/HAJournalServer" /> <get dest="${dist.doc}/NanoSparqlServer.html" - src="http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=NanoSparqlServer&printable=yes" + src="http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer" /> - + + <!-- Stage files specific to NSS deployments provided by Brew and Chef. --> + <copy file="${src.resources}/bin/bigdata" + todir="${dist.bin}" /> + <chmod file="${dist.bin}/bigdata" perm="755" /> </target> <!-- --> @@ -1280,6 +1284,7 @@ <exclude name="bigdata/bin/disco-tool" /> <exclude name="bigdata/bin/pstart" /> <exclude name="bigdata/bin/startHAServices" /> + <exclude name="bigdata/bin/bigdata" /> </tarfileset> <!-- Add scripts separately, making them executable --> @@ -1293,6 +1298,35 @@ </target> + <target name="deploy-artifact-nss" depends="clean, stage" + description="Create compressed tar file for Jetty based deployment via Brew and Chef installers."> + + <tar destfile="${bigdata.dir}/REL-NSS.${version}.tgz" + compression="gzip"> + + <tarfileset dir="${bigdata.dir}/dist"> + <include name="bigdata/doc/**" /> + <exclude name="bigdata/doc/HAJournalServer.html" /> + <include name="bigdata/lib/**" /> + <exclude name="bigdata/lib/bigdata-ganglia.jar" /> + <exclude name="bigdata/lib/browser.jar" /> + <exclude name="bigdata/lib/reggie.jar" /> + <exclude name="bigdata/lib/zookeeper.jar" /> + <exclude name="bigdata/lib/jsk-*.jar" /> + <exclude name="bigdata/lib-dl" /> + <exclude name="bigdata/lib-ext" /> + <include name="bigdata/var/jetty/**" /> + </tarfileset> + + <!-- Add scripts separately, making them executable --> + + <tarfileset dir="${bigdata.dir}/dist" filemode="755"> + <include name="bigdata/bin/bigdata" /> + </tarfileset> + </tar> + + </target> + <!-- FIXME DEBUG and add 'depends="javadoc, stage" (should stage stage javadoc?)' --> <!-- Note: can require 'rpm' and 'rpm-build. --> <!-- TODO: We do not need both this and "deploy-artifact". --> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-29 02:10:11
|
Revision: 8140 http://sourceforge.net/p/bigdata/code/8140 Author: tobycraig Date: 2014-04-29 02:10:07 +0000 (Tue, 29 Apr 2014) Log Message: ----------- Further workbench styling Modified Paths: -------------- branches/RDR/bigdata-war/src/html/css/style.css branches/RDR/bigdata-war/src/html/index.html Modified: branches/RDR/bigdata-war/src/html/css/style.css =================================================================== --- branches/RDR/bigdata-war/src/html/css/style.css 2014-04-26 00:55:27 UTC (rev 8139) +++ branches/RDR/bigdata-war/src/html/css/style.css 2014-04-29 02:10:07 UTC (rev 8140) @@ -154,13 +154,11 @@ .box { padding: 20px 55px; - /*border: 1px solid;*/ border-bottom: none; overflow-x: scroll; } .box:last-of-type { - /*border-bottom: 1px solid;*/ } .modal { @@ -228,21 +226,24 @@ box-sizing: border-box; } -hr { - background: #929292; - border: none; - height: 5px; - width: 50%; - margin: 20px auto; +label[for=load-type], label[for=rdf-type] { + margin: 0 10px 0 30px; } -#load-load { - margin: 0 auto; - display: block; +.has-vertical-divider { + line-height: 55px; } +.vertical-divider { + height: 55px; + display: inline-block; + width: 0; + border-left: 1px solid #d7d7d7; + padding-left: 30px; + margin-left: 30px; +} + .bottom { - /*border-top: 1px solid;*/ text-align: right; } @@ -254,7 +255,7 @@ border: none; } -#advanced-features, #query-response, #query-pagination, #query-explanation, #query-tab .bottom *, #load-response, #load-clear, #explore-results, #namespace-properties { +#advanced-features, #query-response, #query-pagination, #query-explanation, #query-export-container, #load-response, #load-clear, #explore-results, #namespace-properties { display: none; } @@ -337,13 +338,27 @@ padding: 25px; } -button, input[type=reset], input[type=submit], input[type=file], select { +button, input[type=reset], input[type=submit], input[type=file], #load-file-container, select { width: 110px; height: 25px; border: 1px solid #e4e4e4; color: #616161; } +#load-file-container { + display: inline-block; + position: relative; + text-align: center; + line-height: 25px; +} + +#load-file { + position: absolute; + top: 0; + left: 0; + opacity: 0; +} + input[type=checkbox] { display: none; } @@ -361,6 +376,9 @@ background-image: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA0AAAANCAIAAAD9iXMrAAABb0lEQVQoFQXBTYuNYRjA8f913S/neRyUD2AzRVgoNTsZzUodG76Jz2QpZWEhNmNBaM682KCMEsnCHKYx5nm77+vy+8mbnf1spaZK0ShopQY1XI0qroTkdqoas5VnB+tNwyxikQgFJKAFM8rEauTe2nYE5g1NAy0xIpAdQMCNbuCCEUTVBWugJTf8+ND+/UWc0R/y7VOriWZGO8PdFTQFzgSSIKHbe3S/H3j9ZCGx04gqrrig4qYFALh0Az1/uv3wTpqXy9cJDo5Fqpi6MCZGqII7a5vP//yeX7n1ApgCABBcVZw8IU4A4Oe72+L6dWczCGogaCG4KioiuFDh83tWh+cWDx4frc5+2Q0i4AyRGlzNrFSqcXLMx6271za2YsvVm0/3Xy2Oj6hOHMCqLJfLl9/X85w8oxGqUB0VELynM+oJGxeXMTljhxrSUxM1oAURCmBMI/8GFJW3u3vRJhcVN4jJy6iqSKHmkk1LttDH+h9YiLIwVyNFQAAAAABJRU5ErkJggg=="); } +#load-response > pre { + overflow-x: scroll; +} .clear { clear: both; @@ -407,10 +425,12 @@ border: 1px solid #e1e1e1; padding: 10px 25px; margin: 0 55px; + color: #e1e1e1; } #results-per-page { width: 60px; + color: #e1e1e1; } #query-pagination button { @@ -418,7 +438,10 @@ height: 20px; border: 1px solid #e1e1e1; background: transparent; + color: #e1e1e1; } #current-page { - border: 1px solid #e1e1e1; \ No newline at end of file + border: 1px solid #e1e1e1; + color: #e1e1e1; +} Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-04-26 00:55:27 UTC (rev 8139) +++ branches/RDR/bigdata-war/src/html/index.html 2014-04-29 02:10:07 UTC (rev 8140) @@ -40,17 +40,22 @@ <div id="load-errors"></div> <textarea id="load-box" placeholder="(Type in or drag a file containing RDF data, a SPARQL update or a file path or URL)"></textarea> </div> - <p id="large-file-message">Your file <span id="filename"></span> is too large to display here, but will be uploaded as normal. <a href="#" id="clear-file">Remove file</a></p> - <p> - <input type="file" id="load-file"><br> - <label for="load-type">Type:</label> - <select id="load-type"> + + <div class="controls-container"> + <span id="load-file-container" class="orange">Upload a local file<input type="file" id="load-file"></span> + <button class="orange right">Clear</button> + <p id="large-file-message">Your file <span id="filename"></span> is too large to display here, but will be uploaded as normal. <a href="#" id="clear-file">Remove file</a></p> + </div> + + <div class="controls-container right has-vertical-divider"> + <label for="load-type">TYPE:</label> + <select id="load-type" class="blue"> <option value="sparql" selected="selected">SPARQL Update</option> <option value="rdf">RDF Data</option> <option value="path">File Path or URL</option> </select> - <label for="rdf-type">Format:</label> - <select id="rdf-type"> + <label for="rdf-type">FORMAT:</label> + <select id="rdf-type" class="blue"> <option value="n-quads">N-Quads</option> <option value="n-triples">N-Triples</option> <option value="n3">Notation3</option> @@ -60,19 +65,19 @@ <option value="trix">TriX</option> <option value="turtle">Turtle</option> </select> - </p> - <hr class="shadow"> - <button id="load-load">Load</button> + <span class="vertical-divider"> </span> + <button id="load-load" class="green">Load</button> + </div> + </div> <div class="box" id="load-response"> <pre></pre> + <div class="controls-container right"> + <button id="load-clear" class="orange">Clear output</button> + </div> </div> - <div class="bottom"> - <button id="load-clear">Clear output</button> - </div> - </div> <div class="tab" id="query-tab"> @@ -125,9 +130,11 @@ <div id="query-explanation" class="box"> </div> - <div id="query-export-container" class="controls-container"> - <button id="query-export" class="right orange">Export</button> - <button id="query-response-clear" class="right clear orange">Clear</button> + <div id="query-export-container" class="box"> + <div class="controls-container"> + <button id="query-export" class="right orange">Export</button> + <button id="query-response-clear" class="right clear orange">Clear</button> + </div> </div> </div> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-26 00:55:30
|
Revision: 8139 http://sourceforge.net/p/bigdata/code/8139 Author: tobycraig Date: 2014-04-26 00:55:27 +0000 (Sat, 26 Apr 2014) Log Message: ----------- New workbench styling (still in progress) Modified Paths: -------------- branches/RDR/bigdata-war/src/html/css/style.css branches/RDR/bigdata-war/src/html/index.html branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/css/style.css =================================================================== --- branches/RDR/bigdata-war/src/html/css/style.css 2014-04-24 10:02:02 UTC (rev 8138) +++ branches/RDR/bigdata-war/src/html/css/style.css 2014-04-26 00:55:27 UTC (rev 8139) @@ -51,7 +51,11 @@ /* Workbench */ body { - margin: 10px; + margin: 50px 10px; + background-color: #f1f1f1; + font-family: sans-serif; + font-size: 80%; + color: #545454; } h1 { @@ -65,7 +69,8 @@ } #container { - /*max-width: 600px;*/ + max-width: 1000px; + margin: 0 auto; } #top { @@ -77,6 +82,33 @@ float: left; } +#search-form { + padding-top: 80px; +} + +#search-form label { + font-size: 50%; +} + +#search-form input { + border: 1px solid #e3e3e3; + margin: 0; + height: 21px; +} + +#search-form button { + background-image: url('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAsAAAAMCAMAAACDd7esAAAArlBMVEX////7+/vQ0NDExMT09PSzs7MZGRkAAAAHBweHh4f9/f3JyckDAwNXV1fPz8/b29uCgoIEBASNjY1GRkY8PDz+/v57e3sLCwsRERGOjo7Nzc3R0dEjIyO7u7vf399ra2sVFRXk5OT4+PhAQEAqKirw8PAkJCQSEhJvb28pKSlpaWn5+fny8vJ0dHQwMDAhISFYWFgYGBg+Pj7t7e0lJSXZ2dmlpaUmJiahoaGMjIzmLhVjAAAACXZwQWcAAAAVAAAAFABoZ/l0AAAAXklEQVQIHQXBAwLDQAAAsHS6sXNn29b/P7YEUSqdAWRzIeQLRSiVK3G1Vm9As9VG0onQ7UE/DDBMYBTGmExnzBdLWK0322QX9gc4ns6Xa3y7PwCk888XgHf4APD9/QGT1gbfV95+QQAAAABJRU5ErkJggg=='); + background-repeat: no-repeat; + background-color: white; + background-position: 6px 6px; + border: 1px solid #e3e3e3; + border-left: none; + padding: 0; + margin: 0; + width: 25px; + height: 25px; +} + .shadow { -webkit-box-shadow: 0px 3px 5px 0px rgba(50, 50, 50, 0.75); -moz-box-shadow: 0px 3px 5px 0px rgba(50, 50, 50, 0.75); @@ -85,40 +117,50 @@ #tab-selector { clear: both; + padding-top: 20px; } #tab-selector a { padding: 10px; - border: 1px solid; - border-right: none; + border: 1px solid #dadada; border-bottom: none; display: inline-block; float: left; cursor: pointer; + width: 108px; + margin-right: 9px; + text-transform: uppercase; + text-align: center; + font-weight: bold; + background-color: #ebebeb; } -#tab-selector a:last-of-type { - border-right: 1px solid; +#tab-selector p { + float: right; + font-weight: bold; + padding-top: 11px; } -.active { - background: lightgrey; +#tab-selector .active { + background: white; + border-color: white; } .tab { display: none; clear: both; + background: white; } .box { - padding: 10px; - border: 1px solid; + padding: 20px 55px; + /*border: 1px solid;*/ border-bottom: none; overflow-x: scroll; } .box:last-of-type { - border-bottom: 1px solid; + /*border-bottom: 1px solid;*/ } .modal { @@ -154,17 +196,26 @@ } .namespace-shortcuts { - text-align: right; + float: right; + margin-bottom: 20px; } .namespace-shortcuts li { display: inline-block; - border: 1px solid; + border: 1px solid #e4e4e4; padding: 5px; margin-left: 5px; cursor: pointer; + width: 40px; + text-align: center; } +.namespace-shortcuts li:hover { + border-color: #b7b7b7; + background-color: #b7b7b7; + color: #ededed; +} + #large-file-message { display: none; margin: 5px 0; @@ -190,12 +241,8 @@ display: block; } -#load-buttons { - text-align: center; -} - .bottom { - border-top: 1px solid; + /*border-top: 1px solid;*/ text-align: right; } @@ -232,6 +279,7 @@ background-color: transparent; padding: 2px; border-width: 1px; + border-color: #e1e1e1; } /* these should have the same typography so the error highlighting matches up with the query text */ @@ -257,6 +305,14 @@ background-color: red; } +#advanced-features { + margin-right: 50px; +} + +#advanced-features label { + margin-left: 20px; +} + #running-queries li { margin: 10px 0; } @@ -277,3 +333,92 @@ overflow-x: scroll; } +.controls-container { + padding: 25px; +} + +button, input[type=reset], input[type=submit], input[type=file], select { + width: 110px; + height: 25px; + border: 1px solid #e4e4e4; + color: #616161; +} + +input[type=checkbox] { + display: none; +} + +input[type="checkbox"] + label span { + display: inline-block; + width: 13px; + height: 13px; + margin: 0; + vertical-align: middle; + background-image: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA0AAAANCAIAAAD9iXMrAAAA4UlEQVQoU11RARLCIAzr/1+hMHF7iN6x0rLph0wp9Tx3jLVrSNNALE24YR/PzlwRV9nftTPz42gnt2dj0iaXg5LSTagobWpxOqk02vr8CSiBANGqljsaaznIf95HClZCszLy3GnpVi7B7VCwQgBBShqg62uWs07QXI2g1bB+LgdZjtg0dNOKnvbm0beEmgkNARANB4wv95n/ifMYDmBWgnlewL4Gq6cYyw/DVxIRT5YfVh/rq9L0fflKWAg+H8sDw6kYzu/DTUbZJ0hxGFVgDIuPLeHOirHQBTt8HUHFxWLeD+xTIgvbAhziAAAAAElFTkSuQmCC"); +} + +input[type="checkbox"]:checked + label span { + background-image: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA0AAAANCAIAAAD9iXMrAAABb0lEQVQoFQXBTYuNYRjA8f913S/neRyUD2AzRVgoNTsZzUodG76Jz2QpZWEhNmNBaM682KCMEsnCHKYx5nm77+vy+8mbnf1spaZK0ShopQY1XI0qroTkdqoas5VnB+tNwyxikQgFJKAFM8rEauTe2nYE5g1NAy0xIpAdQMCNbuCCEUTVBWugJTf8+ND+/UWc0R/y7VOriWZGO8PdFTQFzgSSIKHbe3S/H3j9ZCGx04gqrrig4qYFALh0Az1/uv3wTpqXy9cJDo5Fqpi6MCZGqII7a5vP//yeX7n1ApgCABBcVZw8IU4A4Oe72+L6dWczCGogaCG4KioiuFDh83tWh+cWDx4frc5+2Q0i4AyRGlzNrFSqcXLMx6271za2YsvVm0/3Xy2Oj6hOHMCqLJfLl9/X85w8oxGqUB0VELynM+oJGxeXMTljhxrSUxM1oAURCmBMI/8GFJW3u3vRJhcVN4jJy6iqSKHmkk1LttDH+h9YiLIwVyNFQAAAAABJRU5ErkJggg=="); +} + + +.clear { + clear: both; +} + +.right { + float: right; +} + +.orange { + background-color: #ffd8b3; + background-image: -webkit-gradient(linear, left top, left bottom, from(#ffffff), to(#ffd8b3)); + background-image: -webkit-linear-gradient(top, #ffffff, #ffd8b3); + background-image: -moz-linear-gradient(top, #ffffff, #ffd8b3); + background-image: -ms-linear-gradient(top, #ffffff, #ffd8b3); + background-image: -o-linear-gradient(top, #ffffff, #ffd8b3); + background-image: linear-gradient(to bottom, #ffffff, #ffd8b3); + filter:progid:DXImageTransform.Microsoft.gradient(GradientType=0,startColorstr=#ffffff, endColorstr=#ffd8b3); +} + +.blue { + background-color: #d3e4f6; + background-image: -webkit-gradient(linear, left top, left bottom, from(#ffffff), to(#d3e4f6)); + background-image: -webkit-linear-gradient(top, #ffffff, #d3e4f6); + background-image: -moz-linear-gradient(top, #ffffff, #d3e4f6); + background-image: -ms-linear-gradient(top, #ffffff, #d3e4f6); + background-image: -o-linear-gradient(top, #ffffff, #d3e4f6); + background-image: linear-gradient(to bottom, #ffffff, #d3e4f6); + filter:progid:DXImageTransform.Microsoft.gradient(GradientType=0,startColorstr=#ffffff, endColorstr=#d3e4f6); +} + +.green { + background-color: #d9e689; + background-image: -webkit-gradient(linear, left top, left bottom, from(#ffffff), to(#d9e689)); + background-image: -webkit-linear-gradient(top, #ffffff, #d9e689); + background-image: -moz-linear-gradient(top, #ffffff, #d9e689); + background-image: -ms-linear-gradient(top, #ffffff, #d9e689); + background-image: -o-linear-gradient(top, #ffffff, #d9e689); + background-image: linear-gradient(to bottom, #ffffff, #d9e689); + filter:progid:DXImageTransform.Microsoft.gradient(GradientType=0,startColorstr=#ffffff, endColorstr=#d9e689); +} + +#query-pagination { + border: 1px solid #e1e1e1; + padding: 10px 25px; + margin: 0 55px; +} + +#results-per-page { + width: 60px; +} + +#query-pagination button { + width: 20px; + height: 20px; + border: 1px solid #e1e1e1; + background: transparent; +} + +#current-page { + border: 1px solid #e1e1e1; \ No newline at end of file Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-04-24 10:02:02 UTC (rev 8138) +++ branches/RDR/bigdata-war/src/html/index.html 2014-04-26 00:55:27 UTC (rev 8139) @@ -16,8 +16,7 @@ <div id="top"> <img src="/bigdata/html/images/logo.png" id="logo"> - <form id="search-form"><label for="search-text">Search:</label> <input type="text" id="search-text"></form> - <p>Current namespace: <span id="current-namespace"></span></p> + <form id="search-form"><label for="search-text">SEARCH:</label> <input type="text" id="search-text"><button type="submit"><span> </span></button></form> </div> <div id="tab-selector"> @@ -27,6 +26,7 @@ <a data-target="status">Status</a> <a data-target="performance">Performance</a> <a data-target="namespaces">Namespaces</a> + <p>Current namespace: <span id="current-namespace"></span></p> </div> <div class="tab" id="load-tab"> @@ -51,7 +51,6 @@ </select> <label for="rdf-type">Format:</label> <select id="rdf-type"> - <option value="">Select RDF format</option> <option value="n-quads">N-Quads</option> <option value="n-triples">N-Triples</option> <option value="n3">Notation3</option> @@ -86,20 +85,21 @@ <div id="query-errors"></div> <textarea id="query-box" name="query" placeholder="(Input a SPARQL query)"></textarea> - <a href="#" id="advanced-features-toggle">Advanced features</a> - - <div id="advanced-features"> - <input type="checkbox" id="query-explain"> <label for="query-explain">Explain</label> - <input type="checkbox" name="analytic" value="true" id="query-analytic"> <label for="query-analytic">Analytic</label> - <input type="checkbox" name="RTO" value="true" id="query-rto"> <label for="query-rto">Runtime Query Optimizer</label> + <div class="controls-container"> + <input type="reset" value="Clear" class="orange right"> </div> + <div class="controls-container"> + <div class="clear right"> + <a href="#" id="advanced-features-toggle">ADVANCED FEATURES</a>: + <span id="advanced-features"> + <input type="checkbox" id="query-explain"><label for="query-explain"><span></span> EXPLAIN</label> + <input type="checkbox" name="analytic" value="true" id="query-analytic"><label for="query-analytic"><span></span> ANALYTIC</label> + <input type="checkbox" name="RTO" value="true" id="query-rto"><label for="query-rto"><span></span> RUNTIME QUERY OPTIMIZER</label> + </span> + <input type="submit" value="Execute" class="green"> + </div> + </div> - <hr class="shadow"> - - <div id="load-buttons"> - <input type="submit" value="Execute"> - <input type="reset" value="Clear"> - </div> </form> </div> @@ -116,18 +116,18 @@ <option>100</option> </select> per page <div id="page-selector"> - <button id="previous-page"><</button> + <button id="previous-page">◀</button> Page <input type="text" id="current-page"> of <span id="result-pages"></span> - <button id="next-page">></button> + <button id="next-page">▶</button> </div> </div> <div id="query-explanation" class="box"> </div> - <div class="bottom"> - <button id="query-export">Export</button> - <button id="query-response-clear">Clear</button> + <div id="query-export-container" class="controls-container"> + <button id="query-export" class="right orange">Export</button> + <button id="query-response-clear" class="right clear orange">Clear</button> </div> </div> @@ -207,6 +207,8 @@ </div> + <div class="clear"> </div> + </div> <div id="overlay"></div> Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-24 10:02:02 UTC (rev 8138) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-26 00:55:27 UTC (rev 8139) @@ -26,6 +26,7 @@ } var query = 'select ?s ?p ?o { ?o bds:search "' + term + '" . ?s ?p ?o . }' $('#query-box').val(query); + $('#query-errors').hide(); $('#query-form').submit(); showTab('query'); }); @@ -480,7 +481,7 @@ $('#query-response-clear').click(function() { $('#query-response, #query-explanation').empty(''); - $('#query-response, #query-pagination, #query-explanation, #query-tab .bottom *').hide(); + $('#query-response, #query-pagination, #query-explanation, #query-export-container').hide(); }); $('#query-export').click(function() { updateExportFileExtension(); showModal('query-export-modal'); }); @@ -611,7 +612,7 @@ function showQueryResults(data) { $('#query-response').empty(); $('#query-export-rdf').hide(); - $('#query-response, #query-pagination, #query-tab .bottom *').show(); + $('#query-response, #query-pagination, #query-export-container').show(); var table = $('<table>').appendTo($('#query-response')); if(this.dataTypes[1] == 'xml') { // RDF @@ -700,7 +701,7 @@ } function queryResultsError(jqXHR, textStatus, errorThrown) { - $('#query-response, #query-tab .bottom *').show(); + $('#query-response, #query-export-container').show(); $('#query-response').text('Error! ' + textStatus + ' ' + jqXHR.statusText); highlightError(jqXHR.statusText, 'query'); } @@ -812,7 +813,7 @@ } // update current results numbers - $('#current-results').html((start + 1) + ' - ' + end); + $('#current-results').html((start + 1) + '-' + end); $('#current-page').val(n); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-24 10:02:05
|
Revision: 8138 http://sourceforge.net/p/bigdata/code/8138 Author: dmekonnen Date: 2014-04-24 10:02:02 +0000 (Thu, 24 Apr 2014) Log Message: ----------- Adding "sudo" to README file and archiving initial brew work. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/brew/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/brew/bigdata.rb Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/brew/bigdata.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/brew/bigdata.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/brew/bigdata.rb 2014-04-24 10:02:02 UTC (rev 8138) @@ -0,0 +1,59 @@ +require 'formula' + +# Documentation: https://github.com/mxcl/homebrew/wiki/Formula-Cookbook +# /usr/local/Library/Contributions/example-formula.rb +# PLEASE REMOVE ALL GENERATED COMMENTS BEFORE SUBMITTING YOUR PULL REQUEST! + +class SystapBigdata < Formula + homepage 'http://bigdata.com/bigdata/blog/' + url 'http://iweb.dl.sourceforge.net/project/bigdata/bigdata/1.3.0/REL.bigdata-1.3.0.tgz' + sha1 '605e800386300a6965125e0e9bfc06a268df0f08' + + # depends_on 'cmake' => :build + # depends_on :java7 # if your formula requires any X11/XQuartz components + + def install + # Install the base files + prefix.install Dir['*'] + + # Setup the lib files + # (var+'lib/bigdata').mkpath + + + # Extract bigdata and install to sbin + + # sbin.install 'bigdata' + # (sbin/'bigdata').chmod 0755 + end + + def caveats; <<-EOS.undent + After launching, visit the Bigdata Workbench at: http://localhost:8080/bigdata + EOS + end + + + plist_options :manual => 'bigdata' + + def plist; <<-EOS.undent + <?xml version="1.0" encoding="UTF-8"?> + <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" + "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> + <plist version="1.0"> + <dict> + <key>Label</key> + <string>#{plist_name}</string> + <key>Program</key> + <string>#{opt_sbin}/bigdata</string> + <key>RunAtLoad</key> + <true/> + <key>EnvironmentVariables</key> + <dict> + <!-- need erl in the path --> + <key>PATH</key> + <string>/usr/local/sbin:/usr/bin:/bin:/usr/local/bin</string> + </dict> + </dict> + </plist> + EOS + end +end Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt 2014-04-22 16:22:14 UTC (rev 8137) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt 2014-04-24 10:02:02 UTC (rev 8138) @@ -46,11 +46,12 @@ ------------- The "Boto" python library for the AWS API must be installed in order to instantiate the cluster. If not already installed: - % pip install boto + % sudo pip install pycrypto + % sudo pip install boto alternately: - % easy_install boto + % sudo easy_install boto If while running the python scripts the error message appears "ImportError: No module named boto", you will need to set the This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-22 16:22:20
|
Revision: 8137 http://sourceforge.net/p/bigdata/code/8137 Author: thompsonbry Date: 2014-04-22 16:22:14 +0000 (Tue, 22 Apr 2014) Log Message: ----------- Added test for interrupted() for every 20 solutions processed by the ConditionalRoutingOp. Added test for interrupted() for each RDF Value tested by the RegexBOp. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java 2014-04-21 23:18:59 UTC (rev 8136) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java 2014-04-22 16:22:14 UTC (rev 8137) @@ -1,243 +1,251 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Aug 25, 2010 - */ - -package com.bigdata.bop.bset; - -import java.util.Arrays; -import java.util.Map; -import java.util.concurrent.Callable; -import java.util.concurrent.FutureTask; - -import com.bigdata.bop.BOp; -import com.bigdata.bop.BOpContext; -import com.bigdata.bop.IBindingSet; -import com.bigdata.bop.IConstraint; -import com.bigdata.bop.NV; -import com.bigdata.bop.PipelineOp; -import com.bigdata.bop.engine.BOpStats; -import com.bigdata.relation.accesspath.IBlockingBuffer; - -import cutthecrap.utils.striterators.ICloseableIterator; - -/** - * An operator for conditional routing of binding sets in a pipeline. The - * operator will copy binding sets either to the default sink (if a condition is - * satisfied) and otherwise to the alternate sink (iff one is specified). If a - * solution fails the constraint and the alternate sink is not specified, then - * the solution is dropped. - * <p> - * Conditional routing can be useful where a different data flow is required - * based on the type of an object (for example a term identifier versus an - * inline term in the RDF database) or where there is a need to jump around a - * join group based on some condition. - * <p> - * Conditional routing will cause reordering of solutions when the alternate - * sink is specified as some solutions will flow to the primary sink while - * others flow to the alterate sink. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id: ConditionalRoutingOp.java 7773 2014-01-11 12:49:05Z thompsonbry - * $ - */ -public class ConditionalRoutingOp extends PipelineOp { - - /** - * - */ - private static final long serialVersionUID = 1L; - - public interface Annotations extends PipelineOp.Annotations { - - /** - * An {@link IConstraint} which specifies the condition. When the - * condition is satisfied the binding set is routed to the default sink. - * When the condition is not satisfied, the binding set is routed to the - * alternative sink. - */ - String CONDITION = ConditionalRoutingOp.class.getName() + ".condition"; - - } - - /** - * Deep copy constructor. - * - * @param op - */ - public ConditionalRoutingOp(final ConditionalRoutingOp op) { - - super(op); - - } - - /** - * Shallow copy constructor. - * - * @param args - * @param annotations - */ - public ConditionalRoutingOp(final BOp[] args, - final Map<String, Object> annotations) { - - super(args, annotations); - - } - - public ConditionalRoutingOp(final BOp[] args, final NV... anns) { - - this(args, NV.asMap(anns)); - - } - - /** - * @see Annotations#CONDITION - */ - public IConstraint getCondition() { - - return (IConstraint) getProperty(Annotations.CONDITION); - - } - - @Override - public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { - - return new FutureTask<Void>(new ConditionalRouteTask(this, context)); - - } - - /** - * Copy the source to the sink or the alternative sink depending on the - * condition. - */ - static private class ConditionalRouteTask implements Callable<Void> { - - private final BOpStats stats; - - private final IConstraint condition; - - private final ICloseableIterator<IBindingSet[]> source; - - private final IBlockingBuffer<IBindingSet[]> sink; - - private final IBlockingBuffer<IBindingSet[]> sink2; - - ConditionalRouteTask(final ConditionalRoutingOp op, - final BOpContext<IBindingSet> context) { - - this.stats = context.getStats(); - - this.condition = op.getCondition(); - - if (condition == null) - throw new IllegalArgumentException(); - - this.source = context.getSource(); - - this.sink = context.getSink(); - - this.sink2 = context.getSink2(); // MAY be null. - -// if (sink2 == null) -// throw new IllegalArgumentException(); - - if (sink == sink2) - throw new IllegalArgumentException(); - - } - - @Override - public Void call() throws Exception { - try { - while (source.hasNext()) { - - final IBindingSet[] chunk = source.next(); - - stats.chunksIn.increment(); - stats.unitsIn.add(chunk.length); - - final IBindingSet[] def = new IBindingSet[chunk.length]; - final IBindingSet[] alt = sink2 == null ? null - : new IBindingSet[chunk.length]; - - int ndef = 0, nalt = 0; - - for (int i = 0; i < chunk.length; i++) { - - final IBindingSet bset = chunk[i].clone(); - - if (condition.accept(bset)) { - - // solution passes condition. default sink. - def[ndef++] = bset; - - } else if (sink2 != null) { - - // solution fails condition. alternative sink. - alt[nalt++] = bset; - - } - - } - - if (ndef > 0) { - if (ndef == def.length) - sink.add(def); - else - sink.add(Arrays.copyOf(def, ndef)); -// stats.chunksOut.increment(); -// stats.unitsOut.add(ndef); - } - - if (nalt > 0 && sink2 != null) { - if (nalt == alt.length) - sink2.add(alt); - else - sink2.add(Arrays.copyOf(alt, nalt)); -// stats.chunksOut.increment(); -// stats.unitsOut.add(nalt); - } - - } - - sink.flush(); - if (sink2 != null) - sink2.flush(); - - return null; - - } finally { - source.close(); - sink.close(); - if (sink2 != null) - sink2.close(); - - } - - } // call() - - } // ConditionalRoutingTask. - -} +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Aug 25, 2010 + */ + +package com.bigdata.bop.bset; + +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.FutureTask; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.BOpContext; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IConstraint; +import com.bigdata.bop.NV; +import com.bigdata.bop.PipelineOp; +import com.bigdata.bop.engine.BOpStats; +import com.bigdata.relation.accesspath.IBlockingBuffer; + +import cutthecrap.utils.striterators.ICloseableIterator; + +/** + * An operator for conditional routing of binding sets in a pipeline. The + * operator will copy binding sets either to the default sink (if a condition is + * satisfied) and otherwise to the alternate sink (iff one is specified). If a + * solution fails the constraint and the alternate sink is not specified, then + * the solution is dropped. + * <p> + * Conditional routing can be useful where a different data flow is required + * based on the type of an object (for example a term identifier versus an + * inline term in the RDF database) or where there is a need to jump around a + * join group based on some condition. + * <p> + * Conditional routing will cause reordering of solutions when the alternate + * sink is specified as some solutions will flow to the primary sink while + * others flow to the alterate sink. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: ConditionalRoutingOp.java 7773 2014-01-11 12:49:05Z thompsonbry + * $ + */ +public class ConditionalRoutingOp extends PipelineOp { + + /** + * + */ + private static final long serialVersionUID = 1L; + + public interface Annotations extends PipelineOp.Annotations { + + /** + * An {@link IConstraint} which specifies the condition. When the + * condition is satisfied the binding set is routed to the default sink. + * When the condition is not satisfied, the binding set is routed to the + * alternative sink. + */ + String CONDITION = ConditionalRoutingOp.class.getName() + ".condition"; + + } + + /** + * Deep copy constructor. + * + * @param op + */ + public ConditionalRoutingOp(final ConditionalRoutingOp op) { + + super(op); + + } + + /** + * Shallow copy constructor. + * + * @param args + * @param annotations + */ + public ConditionalRoutingOp(final BOp[] args, + final Map<String, Object> annotations) { + + super(args, annotations); + + } + + public ConditionalRoutingOp(final BOp[] args, final NV... anns) { + + this(args, NV.asMap(anns)); + + } + + /** + * @see Annotations#CONDITION + */ + public IConstraint getCondition() { + + return (IConstraint) getProperty(Annotations.CONDITION); + + } + + @Override + public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { + + return new FutureTask<Void>(new ConditionalRouteTask(this, context)); + + } + + /** + * Copy the source to the sink or the alternative sink depending on the + * condition. + */ + static private class ConditionalRouteTask implements Callable<Void> { + + private final BOpStats stats; + + private final IConstraint condition; + + private final ICloseableIterator<IBindingSet[]> source; + + private final IBlockingBuffer<IBindingSet[]> sink; + + private final IBlockingBuffer<IBindingSet[]> sink2; + + ConditionalRouteTask(final ConditionalRoutingOp op, + final BOpContext<IBindingSet> context) { + + this.stats = context.getStats(); + + this.condition = op.getCondition(); + + if (condition == null) + throw new IllegalArgumentException(); + + this.source = context.getSource(); + + this.sink = context.getSink(); + + this.sink2 = context.getSink2(); // MAY be null. + +// if (sink2 == null) +// throw new IllegalArgumentException(); + + if (sink == sink2) + throw new IllegalArgumentException(); + + } + + @Override + public Void call() throws Exception { + try { + while (source.hasNext()) { + + final IBindingSet[] chunk = source.next(); + + stats.chunksIn.increment(); + stats.unitsIn.add(chunk.length); + + final IBindingSet[] def = new IBindingSet[chunk.length]; + final IBindingSet[] alt = sink2 == null ? null + : new IBindingSet[chunk.length]; + + int ndef = 0, nalt = 0; + + for (int i = 0; i < chunk.length; i++) { + + if (i % 20 == 0 && Thread.interrupted()) { + + // Eagerly notice if the operator is interrupted. + throw new RuntimeException( + new InterruptedException()); + + } + + final IBindingSet bset = chunk[i].clone(); + + if (condition.accept(bset)) { + + // solution passes condition. default sink. + def[ndef++] = bset; + + } else if (sink2 != null) { + + // solution fails condition. alternative sink. + alt[nalt++] = bset; + + } + + } + + if (ndef > 0) { + if (ndef == def.length) + sink.add(def); + else + sink.add(Arrays.copyOf(def, ndef)); +// stats.chunksOut.increment(); +// stats.unitsOut.add(ndef); + } + + if (nalt > 0 && sink2 != null) { + if (nalt == alt.length) + sink2.add(alt); + else + sink2.add(Arrays.copyOf(alt, nalt)); +// stats.chunksOut.increment(); +// stats.unitsOut.add(nalt); + } + + } + + sink.flush(); + if (sink2 != null) + sink2.flush(); + + return null; + + } finally { + source.close(); + sink.close(); + if (sink2 != null) + sink2.close(); + + } + + } // call() + + } // ConditionalRoutingTask. + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java 2014-04-21 23:18:59 UTC (rev 8136) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java 2014-04-22 16:22:14 UTC (rev 8137) @@ -44,17 +44,17 @@ * SPARQL REGEX operator. */ public class RegexBOp extends XSDBooleanIVValueExpression - implements INeedsMaterialization { + implements INeedsMaterialization { /** - * - */ - private static final long serialVersionUID = 1357420268214930143L; - - private static final transient Logger log = Logger.getLogger(RegexBOp.class); + * + */ + private static final long serialVersionUID = 1357420268214930143L; + + private static final transient Logger log = Logger.getLogger(RegexBOp.class); public interface Annotations extends XSDBooleanIVValueExpression.Annotations { - + /** * The cached regex pattern. */ @@ -64,65 +64,65 @@ } private static Map<String,Object> anns( - final IValueExpression<? extends IV> pattern, - final IValueExpression<? extends IV> flags) { - - try { - - if (pattern instanceof IConstant && - (flags == null || flags instanceof IConstant)) { - - final IV parg = ((IConstant<IV>) pattern).get(); - - final IV farg = flags != null ? - ((IConstant<IV>) flags).get() : null; - - if (parg.hasValue() && (farg == null || farg.hasValue())) { - - final Value pargVal = parg.getValue(); - - final Value fargVal = farg != null ? farg.getValue() : null; - - return NV.asMap( - new NV(Annotations.PATTERN, - getPattern(pargVal, fargVal))); - - } - - } - - } catch (Exception ex) { - - if (log.isInfoEnabled()) { - log.info("could not create pattern for: " + pattern + ", " + flags); - } - - } - - return BOp.NOANNS; - + final IValueExpression<? extends IV> pattern, + final IValueExpression<? extends IV> flags) { + + try { + + if (pattern instanceof IConstant && + (flags == null || flags instanceof IConstant)) { + + final IV parg = ((IConstant<IV>) pattern).get(); + + final IV farg = flags != null ? + ((IConstant<IV>) flags).get() : null; + + if (parg.hasValue() && (farg == null || farg.hasValue())) { + + final Value pargVal = parg.getValue(); + + final Value fargVal = farg != null ? farg.getValue() : null; + + return NV.asMap( + new NV(Annotations.PATTERN, + getPattern(pargVal, fargVal))); + + } + + } + + } catch (Exception ex) { + + if (log.isInfoEnabled()) { + log.info("could not create pattern for: " + pattern + ", " + flags); + } + + } + + return BOp.NOANNS; + } - /** - * Construct a regex bop without flags. - */ + /** + * Construct a regex bop without flags. + */ @SuppressWarnings("rawtypes") - public RegexBOp( - final IValueExpression<? extends IV> var, - final IValueExpression<? extends IV> pattern) { + public RegexBOp( + final IValueExpression<? extends IV> var, + final IValueExpression<? extends IV> pattern) { this(new BOp[] { var, pattern }, anns(pattern, null)); } - /** - * Construct a regex bop with flags. - */ - @SuppressWarnings("rawtypes") + /** + * Construct a regex bop with flags. + */ + @SuppressWarnings("rawtypes") public RegexBOp( - final IValueExpression<? extends IV> var, - final IValueExpression<? extends IV> pattern, - final IValueExpression<? extends IV> flags) { + final IValueExpression<? extends IV> var, + final IValueExpression<? extends IV> pattern, + final IValueExpression<? extends IV> flags) { this(new BOp[] { var, pattern, flags }, anns(pattern, flags)); @@ -133,8 +133,8 @@ */ public RegexBOp(final BOp[] args, final Map<String, Object> anns) { - super(args, anns); - + super(args, anns); + if (args.length < 2 || args[0] == null || args[1] == null) throw new IllegalArgumentException(); @@ -146,33 +146,34 @@ public RegexBOp(final RegexBOp op) { super(op); } - + + @Override public Requirement getRequirement() { - - return INeedsMaterialization.Requirement.SOMETIMES; - + + return INeedsMaterialization.Requirement.SOMETIMES; + } - + + @Override public boolean accept(final IBindingSet bs) { - - @SuppressWarnings("rawtypes") + final Value var = asValue(getAndCheckBound(0, bs)); - + @SuppressWarnings("rawtypes") final IV pattern = getAndCheckBound(1, bs); @SuppressWarnings("rawtypes") final IV flags = arity() > 2 ? get(2).get(bs) : null; - + if (log.isDebugEnabled()) { - log.debug("regex var: " + var); - log.debug("regex pattern: " + pattern); - log.debug("regex flags: " + flags); + log.debug("regex var: " + var); + log.debug("regex pattern: " + pattern); + log.debug("regex flags: " + flags); } - - return accept(var, pattern.getValue(), - flags != null ? flags.getValue() : null); + return accept(var, pattern.getValue(), flags != null ? flags.getValue() + : null); + } /** @@ -185,67 +186,87 @@ * REGEXBOp should cache the Pattern when it is a constant </a> */ private boolean accept(final Value arg, final Value parg, final Value farg) { - + if (log.isDebugEnabled()) { - log.debug("regex var: " + arg); - log.debug("regex pattern: " + parg); - log.debug("regex flags: " + farg); + log.debug("regex var: " + arg); + log.debug("regex pattern: " + parg); + log.debug("regex flags: " + farg); } - + if (QueryEvaluationUtil.isSimpleLiteral(arg)) { - + final String text = ((Literal) arg).getLabel(); - + try { - - // first check for cached pattern - Pattern pattern = (Pattern) getProperty(Annotations.PATTERN); - if (pattern == null) { - pattern = getPattern(parg, farg); - } + + // first check for cached pattern + Pattern pattern = (Pattern) getProperty(Annotations.PATTERN); + + if (pattern == null) { + + // resolve the pattern. NB: NOT cached. + pattern = getPattern(parg, farg); + + } + + if (Thread.interrupted()) { + + /* + * Eagerly notice if the operator is interrupted. + * + * Note: Regex can be a high latency operation for a large + * RDF Literal. Therefore we want to check for an interrupt + * before each regex test. The Pattern code itself will not + * notice an interrupt.... + */ + throw new RuntimeException(new InterruptedException()); + + } + final boolean result = pattern.matcher(text).find(); + return result; - + } catch (IllegalArgumentException ex) { - - throw new SparqlTypeErrorException(); - + + throw new SparqlTypeErrorException(); + } - + } else { - - throw new SparqlTypeErrorException(); - + + throw new SparqlTypeErrorException(); + } - + } - private static Pattern getPattern(final Value parg, final Value farg) - throws IllegalArgumentException { - + private static Pattern getPattern(final Value parg, final Value farg) + throws IllegalArgumentException { + if (log.isDebugEnabled()) { - log.debug("regex pattern: " + parg); - log.debug("regex flags: " + farg); + log.debug("regex pattern: " + parg); + log.debug("regex flags: " + farg); } if (QueryEvaluationUtil.isSimpleLiteral(parg) && (farg == null || QueryEvaluationUtil.isSimpleLiteral(farg))) { final String ptn = ((Literal) parg).getLabel(); - String flags = ""; - if (farg != null) { - flags = ((Literal)farg).getLabel(); - } - int f = 0; - for (char c : flags.toCharArray()) { - switch (c) { - case 's': - f |= Pattern.DOTALL; - break; - case 'm': - f |= Pattern.MULTILINE; - break; - case 'i': { + String flags = ""; + if (farg != null) { + flags = ((Literal)farg).getLabel(); + } + int f = 0; + for (char c : flags.toCharArray()) { + switch (c) { + case 's': + f |= Pattern.DOTALL; + break; + case 'm': + f |= Pattern.MULTILINE; + break; + case 'i': { /* * The SPARQL REGEX operator is based on the XQuery REGEX * operator. That operator should be Unicode clean by @@ -257,29 +278,29 @@ * > SPARQL REGEX operator does not perform case-folding * correctly for Unicode data </a> */ - f |= Pattern.CASE_INSENSITIVE; + f |= Pattern.CASE_INSENSITIVE; f |= Pattern.UNICODE_CASE; - break; - } - case 'x': - f |= Pattern.COMMENTS; - break; - case 'd': - f |= Pattern.UNIX_LINES; - break; - case 'u': // Implicit with 'i' flag. -// f |= Pattern.UNICODE_CASE; - break; - default: - throw new IllegalArgumentException(); - } - } + break; + } + case 'x': + f |= Pattern.COMMENTS; + break; + case 'd': + f |= Pattern.UNIX_LINES; + break; + case 'u': // Implicit with 'i' flag. +// f |= Pattern.UNICODE_CASE; + break; + default: + throw new IllegalArgumentException(); + } + } final Pattern pattern = Pattern.compile(ptn, f); return pattern; } - - throw new IllegalArgumentException(); - + + throw new IllegalArgumentException(); + } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-21 23:19:02
|
Revision: 8136 http://sourceforge.net/p/bigdata/code/8136 Author: tobycraig Date: 2014-04-21 23:18:59 +0000 (Mon, 21 Apr 2014) Log Message: ----------- Added error highlighting for SPARQL update in load pane Modified Paths: -------------- branches/RDR/bigdata-war/src/html/css/style.css branches/RDR/bigdata-war/src/html/index.html branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/css/style.css =================================================================== --- branches/RDR/bigdata-war/src/html/css/style.css 2014-04-21 22:08:01 UTC (rev 8135) +++ branches/RDR/bigdata-war/src/html/css/style.css 2014-04-21 23:18:59 UTC (rev 8136) @@ -228,20 +228,20 @@ float: right; } -#query-box { +#load-box, #query-box { background-color: transparent; padding: 2px; border-width: 1px; } /* these should have the same typography so the error highlighting matches up with the query text */ -#query-box, #query-errors { +#load-box, #load-errors, #query-box, #query-errors { font-family: sans-serif; font-size: 90%; line-height: normal; } -#query-errors { +#load-errors, #query-errors { position: absolute; z-index: -1; padding: 8px 3px; @@ -249,11 +249,11 @@ white-space: pre; } -#error-line { +.error-line { background-color: lightgreen; } -#error-character { +.error-character { background-color: red; } Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-04-21 22:08:01 UTC (rev 8135) +++ branches/RDR/bigdata-war/src/html/index.html 2014-04-21 23:18:59 UTC (rev 8136) @@ -36,7 +36,10 @@ <div class="namespace-shortcuts"> </div> - <textarea id="load-box" placeholder="(Type in or drag a file containing RDF data, a SPARQL update or a file path or URL)"></textarea> + <div> + <div id="load-errors"></div> + <textarea id="load-box" placeholder="(Type in or drag a file containing RDF data, a SPARQL update or a file path or URL)"></textarea> + </div> <p id="large-file-message">Your file <span id="filename"></span> is too large to display here, but will be uploaded as normal. <a href="#" id="clear-file">Remove file</a></p> <p> <input type="file" id="load-file"><br> Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-21 22:08:01 UTC (rev 8135) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-21 23:18:59 UTC (rev 8136) @@ -367,6 +367,7 @@ $('#load-box').on('dragover', handleDragOver) .on('drop', handleFile) .on('paste', handlePaste) + .on('input propertychange', function() { $('#load-errors').hide(); }) .bind('keydown', 'ctrl+return', submitLoad); $('#clear-file').click(clearFile); @@ -436,6 +437,7 @@ function updateResponseError(jqXHR, textStatus, errorThrown) { $('#load-response, #load-clear').show(); $('#load-response pre').text('Error! ' + textStatus + ' ' + jqXHR.statusText); + highlightError(jqXHR.statusText, 'load'); } @@ -700,24 +702,29 @@ function queryResultsError(jqXHR, textStatus, errorThrown) { $('#query-response, #query-tab .bottom *').show(); $('#query-response').text('Error! ' + textStatus + ' ' + jqXHR.statusText); - var match = errorThrown.match(/line (\d+), column (\d+)/); + highlightError(jqXHR.statusText, 'query'); +} + +function highlightError(description, pane) { + var match = description.match(/line (\d+), column (\d+)/); if(match) { // highlight character at error position var line = match[1] - 1; var column = match[2] - 1; - var query = $('#query-box').val(); - var lines = query.split('\n'); - $('#query-errors').html(''); + var input = $('#' + pane + '-box').val(); + var lines = input.split('\n'); + var container = '#' + pane + '-errors'; + $(container).html(''); for(var i=0; i<line; i++) { var p = $('<p>').text(lines[i]); - $('#query-errors').append(p); + $(container).append(p); } - $('#query-errors').append('<p id="error-line">'); - $('#error-line').append(document.createTextNode(lines[line].substr(0, column))); - $('#error-line').append($('<span id="error-character">').text(lines[line].charAt(column) || ' ')); - $('#error-line').append(document.createTextNode(lines[line].substr(column + 1))); - $('#query-errors').show(); - $('#query-box').scrollTop(0); + $(container).append('<p class="error-line">'); + $(container + ' .error-line').append(document.createTextNode(lines[line].substr(0, column))); + $(container + ' .error-line').append($('<span class="error-character">').text(lines[line].charAt(column) || ' ')); + $(container + ' .error-line').append(document.createTextNode(lines[line].substr(column + 1))); + $(container).show(); + $('#' + pane + '-box').scrollTop(0); } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-21 22:08:05
|
Revision: 8135 http://sourceforge.net/p/bigdata/code/8135 Author: tobycraig Date: 2014-04-21 22:08:01 +0000 (Mon, 21 Apr 2014) Log Message: ----------- Rudimentary error position highlighting in query, needs some polishing Modified Paths: -------------- branches/RDR/bigdata-war/src/html/css/style.css branches/RDR/bigdata-war/src/html/index.html branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/css/style.css =================================================================== --- branches/RDR/bigdata-war/src/html/css/style.css 2014-04-21 21:04:52 UTC (rev 8134) +++ branches/RDR/bigdata-war/src/html/css/style.css 2014-04-21 22:08:01 UTC (rev 8135) @@ -228,6 +228,35 @@ float: right; } +#query-box { + background-color: transparent; + padding: 2px; + border-width: 1px; +} + +/* these should have the same typography so the error highlighting matches up with the query text */ +#query-box, #query-errors { + font-family: sans-serif; + font-size: 90%; + line-height: normal; +} + +#query-errors { + position: absolute; + z-index: -1; + padding: 8px 3px; + color: transparent; + white-space: pre; +} + +#error-line { + background-color: lightgreen; +} + +#error-character { + background-color: red; +} + #running-queries li { margin: 10px 0; } Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-04-21 21:04:52 UTC (rev 8134) +++ branches/RDR/bigdata-war/src/html/index.html 2014-04-21 22:08:01 UTC (rev 8135) @@ -80,6 +80,7 @@ </div> <form id="query-form"> + <div id="query-errors"></div> <textarea id="query-box" name="query" placeholder="(Input a SPARQL query)"></textarea> <a href="#" id="advanced-features-toggle">Advanced features</a> Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-21 21:04:52 UTC (rev 8134) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-21 22:08:01 UTC (rev 8135) @@ -441,7 +441,8 @@ /* Query */ -$('#query-box').bind('keydown', 'ctrl+return', function(e) { e.preventDefault(); $('#query-form').submit(); }); +$('#query-box').bind('keydown', 'ctrl+return', function(e) { e.preventDefault(); $('#query-form').submit(); }) + .on('input propertychange', function() { $('#query-errors').hide(); }); $('#query-form').submit(submitQuery); function submitQuery(e) { @@ -699,6 +700,25 @@ function queryResultsError(jqXHR, textStatus, errorThrown) { $('#query-response, #query-tab .bottom *').show(); $('#query-response').text('Error! ' + textStatus + ' ' + jqXHR.statusText); + var match = errorThrown.match(/line (\d+), column (\d+)/); + if(match) { + // highlight character at error position + var line = match[1] - 1; + var column = match[2] - 1; + var query = $('#query-box').val(); + var lines = query.split('\n'); + $('#query-errors').html(''); + for(var i=0; i<line; i++) { + var p = $('<p>').text(lines[i]); + $('#query-errors').append(p); + } + $('#query-errors').append('<p id="error-line">'); + $('#error-line').append(document.createTextNode(lines[line].substr(0, column))); + $('#error-line').append($('<span id="error-character">').text(lines[line].charAt(column) || ' ')); + $('#error-line').append(document.createTextNode(lines[line].substr(column + 1))); + $('#query-errors').show(); + $('#query-box').scrollTop(0); + } } /* Pagination */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |