This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <mar...@us...> - 2013-11-14 08:24:41
|
Revision: 7551 http://bigdata.svn.sourceforge.net/bigdata/?rev=7551&view=rev Author: martyncutcher Date: 2013-11-14 08:24:35 +0000 (Thu, 14 Nov 2013) Log Message: ----------- Fix invalid assertion in syncMetaTransients Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2013-11-14 00:49:09 UTC (rev 7550) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2013-11-14 08:24:35 UTC (rev 7551) @@ -1486,11 +1486,9 @@ * metaBits to the metaTransientBits, which will be faster. */ private void syncMetaTransients() { - if (m_metaTransientBits == null) { + if (m_metaTransientBits == null || m_metaTransientBits.length != m_metaBits.length) { m_metaTransientBits = (int[]) m_metaBits.clone(); } else { - assert m_metaTransientBits.length == m_metaBits.length; - System.arraycopy(m_metaBits, 0, m_metaTransientBits, 0, m_metaTransientBits.length); } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-14 00:49:16
|
Revision: 7550 http://bigdata.svn.sourceforge.net/bigdata/?rev=7550&view=rev Author: thompsonbry Date: 2013-11-14 00:49:09 +0000 (Thu, 14 Nov 2013) Log Message: ----------- reduced log level from ERROR to INFO Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestName2Addr.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestName2Addr.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestName2Addr.java 2013-11-13 21:53:56 UTC (rev 7549) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestName2Addr.java 2013-11-14 00:49:09 UTC (rev 7550) @@ -122,7 +122,8 @@ assertTrue(BytesUtil.compareBytes(a, b) == 0); - log.error("name=" + name + ", key=" + BytesUtil.toString(a)); + if (log.isInfoEnabled()) + log.info("name=" + name + ", key=" + BytesUtil.toString(a)); } @@ -335,9 +336,10 @@ private static Set<String> getIndexNames(final Journal jnl, final String prefix, final long timestamp) { - log.error("prefix=" + prefix + ", timestamp=" - + TimestampUtility.toString(timestamp)); - + if (log.isInfoEnabled()) + log.info("prefix=" + prefix + ", timestamp=" + + TimestampUtility.toString(timestamp)); + final Set<String> names = new LinkedHashSet<String>(); final Iterator<String> itr = jnl.indexNameScan(prefix, timestamp); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-13 21:54:03
|
Revision: 7549 http://bigdata.svn.sourceforge.net/bigdata/?rev=7549&view=rev Author: thompsonbry Date: 2013-11-13 21:53:56 +0000 (Wed, 13 Nov 2013) Log Message: ----------- made checkDeadline() public Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java 2013-11-13 21:50:23 UTC (rev 7548) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java 2013-11-13 21:53:56 UTC (rev 7549) @@ -349,7 +349,7 @@ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/772"> * Query timeout only checked at operator start/stop. </a> */ - final protected void checkDeadline() { + final public void checkDeadline() { try { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-13 21:50:30
|
Revision: 7548 http://bigdata.svn.sourceforge.net/bigdata/?rev=7548&view=rev Author: thompsonbry Date: 2013-11-13 21:50:23 +0000 (Wed, 13 Nov 2013) Log Message: ----------- Added @Override annotations and final attributes. Added checkDeadline() method on AbstractRunningQuery. This performs a non-blocking test of the RunState to determine whether a deadline (if one exists) has expired. If so, it halts the query. checkDeadline() is intended to provide a hook that can be used to force timely termination of queries that miss their deadline and do not terminate because some operator is compute bound. The current logic only check the deadline in startOp() and haltOp(). We will have to add additional logic to call checkDeadline() at other times, e.g., from a scheduled executor task, in order to ensure timely termination. See #772. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/RunState.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java 2013-11-13 17:42:46 UTC (rev 7547) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java 2013-11-13 21:50:23 UTC (rev 7548) @@ -340,24 +340,56 @@ } + /** + * If the query deadline has expired, then halt the query. + * + * @throws QueryTimeoutException + * if the query deadline has expired. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/772"> + * Query timeout only checked at operator start/stop. </a> + */ + final protected void checkDeadline() { + + try { + + runState.checkDeadline(); + + } catch (QueryTimeoutException ex) { + + halt(ex); + + /* + * Note: The exception is not rethrown when the query halts for a + * deadline. See startOp() and haltOp() for the standard behavior. + */ + + } + + } + + @Override final public long getDeadline() { return runState.getDeadline(); } + @Override final public long getStartTime() { return startTime.get(); } + @Override final public long getDoneTime() { return doneTime.get(); } + @Override final public long getElapsed() { long mark = doneTime.get(); @@ -379,37 +411,28 @@ } + @Override public QueryEngine getQueryEngine() { return queryEngine; } - /** - * The client executing this query (aka the query controller). - * <p> - * Note: The proxy is primarily for light weight RMI messages used to - * coordinate the distributed query evaluation. Ideally, all large objects - * will be transfered among the nodes of the cluster using NIO buffers. - */ + @Override final public IQueryClient getQueryController() { return clientProxy; } - /** - * The unique identifier for this query. - */ + @Override final public UUID getQueryId() { return queryId; } - /** - * Return the operator tree for this query. - */ + @Override final public PipelineOp getQuery() { return query; @@ -425,6 +448,7 @@ } + @Override final public Map<Integer/* bopId */, BOpStats> getStats() { return Collections.unmodifiableMap(statsMap); @@ -744,6 +768,11 @@ halt(ex); + /* + * Note: The exception is not rethrown when the query halts for a + * deadline. + */ + } finally { lock.unlock(); @@ -830,6 +859,11 @@ halt(t); + /* + * Note: The exception is not rethrown when the query halts for a + * deadline. + */ + } finally { lock.unlock(); @@ -1149,6 +1183,7 @@ */ abstract protected void consumeChunk(); + @Override final public ICloseableIterator<IBindingSet[]> iterator() { if (!controller) @@ -1161,6 +1196,7 @@ } + @Override final public void halt(final Void v) { lock.lock(); @@ -1181,6 +1217,7 @@ } + @Override final public <T extends Throwable> T halt(final T t) { if (t == null) @@ -1223,6 +1260,7 @@ * consume them.</li> * </ul> */ + @Override final public boolean cancel(final boolean mayInterruptIfRunning) { /* * Set if we notice an interrupt during clean up of the query and then @@ -1397,43 +1435,50 @@ } + @Override final public Void get() throws InterruptedException, ExecutionException { return future.get(); } - final public Void get(long arg0, TimeUnit arg1) + @Override + final public Void get(final long arg0, final TimeUnit arg1) throws InterruptedException, ExecutionException, TimeoutException { return future.get(arg0, arg1); } + @Override final public boolean isCancelled() { return future.isCancelled(); } + @Override final public boolean isDone() { return future.isDone(); } + @Override final public Throwable getCause() { return future.getCause(); } + @Override public IBigdataFederation<?> getFederation() { return queryEngine.getFederation(); } + @Override public IIndexManager getLocalIndexManager() { return queryEngine.getIndexManager(); @@ -1526,6 +1571,7 @@ * buffered on the native heap) rather than as a limit to the among of * native memory the operator may use while it is running. */ + @Override public IMemoryManager getMemoryManager() { IMemoryManager memoryManager = this.memoryManager.get(); if (memoryManager == null) { @@ -1545,6 +1591,7 @@ private final AtomicReference<IMemoryManager> memoryManager = new AtomicReference<IMemoryManager>(); + @Override final public IQueryAttributes getAttributes() { return queryAttributes; @@ -1618,6 +1665,7 @@ } } + @Override public String toString() { final StringBuilder sb = new StringBuilder(getClass().getName()); sb.append("{queryId=" + queryId); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2013-11-13 17:42:46 UTC (rev 7547) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2013-11-13 21:50:23 UTC (rev 7548) @@ -85,9 +85,13 @@ /** * The client coordinate the evaluation of this query (aka the query * controller). For a standalone database, this will be the - * {@link QueryEngine}. For scale-out, this will be the RMI proxy for the - * {@link QueryEngine} instance to which the query was submitted for - * evaluation by the application. + * {@link QueryEngine}. + * <p> + * For scale-out, this will be the RMI proxy for the {@link QueryEngine} + * instance to which the query was submitted for evaluation by the + * application. The proxy is primarily for light weight RMI messages used to + * coordinate the distributed query evaluation. Ideally, all large objects + * will be transfered among the nodes of the cluster using NIO buffers. */ IQueryClient getQueryController(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2013-11-13 17:42:46 UTC (rev 7547) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2013-11-13 21:50:23 UTC (rev 7548) @@ -722,6 +722,7 @@ } + @Override public void run() { if(log.isInfoEnabled()) log.info("Running: " + this); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/RunState.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/RunState.java 2013-11-13 17:42:46 UTC (rev 7547) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/RunState.java 2013-11-13 21:50:23 UTC (rev 7548) @@ -606,8 +606,7 @@ if (innerState.allDone.get()) throw new IllegalStateException(ERR_QUERY_HALTED); - if (innerState.deadline.get() < System.currentTimeMillis()) - throw new QueryTimeoutException(ERR_DEADLINE); + checkDeadline(); if (!innerState.started.compareAndSet(false/* expect */, true/* update */)) throw new IllegalStateException(ERR_QUERY_STARTED); @@ -704,11 +703,9 @@ if (innerState.allDone.get()) throw new IllegalStateException(ERR_QUERY_HALTED); -// + " bopId="+msg.bopId+" : msg="+msg); - if (innerState.deadline.get() < System.currentTimeMillis()) - throw new QueryTimeoutException(ERR_DEADLINE); - + checkDeadline(); + innerState.stepCount.incrementAndGet(); final boolean firstTime = _startOp(msg); @@ -777,6 +774,19 @@ } // RunStateEnum /** + * Check the query to see whether its deadline has expired. + * + * @throws QueryTimeoutException + * if the query deadline has expired. + */ + protected void checkDeadline() throws QueryTimeoutException { + + if (innerState.deadline.get() < System.currentTimeMillis()) + throw new QueryTimeoutException(ERR_DEADLINE); + + } + + /** * Update the {@link RunState} to reflect the post-condition of the * evaluation of an operator against one or more {@link IChunkMessage}s, * adjusting the #of messages available for consumption by the operator @@ -809,8 +819,7 @@ if (innerState.allDone.get()) throw new IllegalStateException(ERR_QUERY_HALTED); - if (innerState.deadline.get() < System.currentTimeMillis()) - throw new QueryTimeoutException(ERR_DEADLINE); + checkDeadline(); innerState.stepCount.incrementAndGet(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-13 17:42:52
|
Revision: 7547 http://bigdata.svn.sourceforge.net/bigdata/?rev=7547&view=rev Author: thompsonbry Date: 2013-11-13 17:42:46 +0000 (Wed, 13 Nov 2013) Log Message: ----------- javadoc (removed unused parameter) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2013-11-13 17:41:08 UTC (rev 7546) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2013-11-13 17:42:46 UTC (rev 7547) @@ -1756,9 +1756,6 @@ * This method currently will expose a mutable connection to any * registered {@link CustomServiceFactory}. * - * @param conn - * The connection. - * * @return The connection. * * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/754"> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-13 17:41:14
|
Revision: 7546 http://bigdata.svn.sourceforge.net/bigdata/?rev=7546&view=rev Author: thompsonbry Date: 2013-11-13 17:41:08 +0000 (Wed, 13 Nov 2013) Log Message: ----------- javadoc edit (parameter name). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/BigdataLoader.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/BigdataLoader.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/BigdataLoader.java 2013-11-13 17:37:13 UTC (rev 7545) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/BigdataLoader.java 2013-11-13 17:41:08 UTC (rev 7546) @@ -133,16 +133,16 @@ /** * Load a data file into a SAIL via the Sesame Repository API. * - * @param sail - * the SAIL + * @param repo + * the repository. * @param data * path to the data (assumes ntriples) * * @todo This is not an efficient API for loading the data. use the * {@link DataLoader} instead. */ - private static final void loadData(BigdataSailRepository repo, String data) - throws Exception { + private static final void loadData(final BigdataSailRepository repo, + final String data) throws Exception { RepositoryConnection cxn = null; try { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-13 17:37:19
|
Revision: 7545 http://bigdata.svn.sourceforge.net/bigdata/?rev=7545&view=rev Author: thompsonbry Date: 2013-11-13 17:37:13 +0000 (Wed, 13 Nov 2013) Log Message: ----------- Override annotations. made some methods static. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2013-11-13 17:34:59 UTC (rev 7544) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2013-11-13 17:37:13 UTC (rev 7545) @@ -36,7 +36,6 @@ import org.openrdf.model.Statement; import org.openrdf.model.URI; import org.openrdf.model.Value; -import org.openrdf.model.impl.URIImpl; import org.openrdf.rio.RDFFormat; import org.openrdf.rio.RDFHandlerException; import org.openrdf.rio.RDFParser; @@ -414,6 +413,7 @@ } } + @Override public void handleStatement(final Statement stmt) throws RDFHandlerException { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2013-11-13 17:34:59 UTC (rev 7544) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2013-11-13 17:37:13 UTC (rev 7545) @@ -546,6 +546,7 @@ } } + @Override public void handleStatement(final Statement stmt) throws RDFHandlerException { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2013-11-13 17:34:59 UTC (rev 7544) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2013-11-13 17:37:13 UTC (rev 7545) @@ -658,17 +658,19 @@ * linked data GET by turning it into a SPARQL DESCRIBE query. * @throws IOException */ - private String getQueryString(final HttpServletRequest req) throws IOException { - if (RESTServlet.hasMimeType(req, MIME_SPARQL_QUERY)) { - // return the body of the POST, see trac 711 - return readFully( req.getReader() ); - } - return req.getParameter(ATTR_QUERY) != null ? req + static private String getQueryString(final HttpServletRequest req) + throws IOException { + if (RESTServlet.hasMimeType(req, MIME_SPARQL_QUERY)) { + // return the body of the POST, see trac 711 + return readFully(req.getReader()); + } + return req.getParameter(ATTR_QUERY) != null ? req .getParameter(ATTR_QUERY) : (String) req .getAttribute(ATTR_QUERY); - } - - private String getUpdateString(final HttpServletRequest req) throws IOException { + } + + static private String getUpdateString(final HttpServletRequest req) + throws IOException { if (RESTServlet.hasMimeType(req, MIME_SPARQL_UPDATE)) { // return the body of the POST, see trac 711 return readFully( req.getReader() ); @@ -676,9 +678,10 @@ return req.getParameter(ATTR_UPDATE); } - static String readFully(Reader reader) throws IOException { - char[] arr = new char[8*1024]; // 8K at a time - StringBuffer buf = new StringBuffer(); + // Note: Referenced by the test suite. Should be moved to utility class. + static String readFully(final Reader reader) throws IOException { + final char[] arr = new char[8*1024]; // 8K at a time + final StringBuffer buf = new StringBuffer(); int numChars; while ((numChars = reader.read(arr, 0, arr.length)) > 0) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java 2013-11-13 17:34:59 UTC (rev 7544) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java 2013-11-13 17:37:13 UTC (rev 7545) @@ -355,7 +355,7 @@ if (ServletFileUpload.isMultipartContent(req)) { - doUpdateWithBody(req, resp); + doUpdateWithBody(req, resp); } else { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2013-11-13 17:35:05
|
Revision: 7544 http://bigdata.svn.sourceforge.net/bigdata/?rev=7544&view=rev Author: martyncutcher Date: 2013-11-13 17:34:59 +0000 (Wed, 13 Nov 2013) Log Message: ----------- Add standard banner Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java 2013-11-13 17:33:49 UTC (rev 7543) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java 2013-11-13 17:34:59 UTC (rev 7544) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.journal.jini.ha; import java.util.concurrent.TimeUnit; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2013-11-13 17:33:56
|
Revision: 7543 http://bigdata.svn.sourceforge.net/bigdata/?rev=7543&view=rev Author: martyncutcher Date: 2013-11-13 17:33:49 +0000 (Wed, 13 Nov 2013) Log Message: ----------- Adds ChangeLeader test to force a simple change of quorum leadership that demonstrated the failure mode first observed with #738 and now fixed. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2013-11-13 17:23:29 UTC (rev 7542) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2013-11-13 17:33:49 UTC (rev 7543) @@ -100,6 +100,9 @@ // HA3 test suite in which normal HALog retention rules apply. suite.addTestSuite(TestHA3JournalServerWithHALogs.class); + // HA3 test suite focusing on changing the leader. + suite.addTestSuite(TestHA3ChangeLeader.class); + // HA3 snapshot policy test suite. suite.addTestSuite(TestHA3SnapshotPolicy.class); suite.addTestSuite(TestHA3SnapshotPolicy2.class); Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java 2013-11-13 17:33:49 UTC (rev 7543) @@ -0,0 +1,48 @@ +package com.bigdata.journal.jini.ha; + +import java.util.concurrent.TimeUnit; + +import com.bigdata.ha.HAGlue; + +public class TestHA3ChangeLeader extends AbstractHA3JournalServerTestCase { + + /** + * We have seen problems with updates when the leader changes, this test reconstructs + * this simple scenario, with and update transaction, change of leader and then a + * second update transaction. + * + * @throws Exception + */ + public void testStartABC_ChangeLeader() throws Exception { + + // Start 3 services + final HAGlue serverA = startA(); + final HAGlue serverB = startB(); + final HAGlue serverC = startC(); + + // Wait for a quorum meet. + final long token1 = awaitFullyMetQuorum(); + + // await pipeline + awaitPipeline(20, TimeUnit.SECONDS, new HAGlue[] { serverA, serverB, + serverC }); + + awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); + + /* + * Now go through a commit point with a met quorum. The HALog + * files should be retained at that commit point. + */ + simpleTransaction(); + + shutdownA(); + + final long token2 = awaitNextQuorumMeet(token1); + + simpleTransaction(); + + // And again verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverB, serverC }); + + } +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-13 17:23:35
|
Revision: 7542 http://bigdata.svn.sourceforge.net/bigdata/?rev=7542&view=rev Author: thompsonbry Date: 2013-11-13 17:23:29 +0000 (Wed, 13 Nov 2013) Log Message: ----------- cleanup in HAJournal-X.config and jiniClient.config files for HA CI (jini and zookeeper stuff that was commented out was removed as irrelevant to CI). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/jiniClient.config Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config 2013-11-13 17:22:34 UTC (rev 7541) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config 2013-11-13 17:23:29 UTC (rev 7542) @@ -98,10 +98,6 @@ // runs jini on the localhost using unicast locators. new LookupLocator("jini://localhost/") - - // runs jini on one or more hosts using unicast locators. - //new LookupLocator("jini://"+jini1), - //new LookupLocator("jini://"+jini2), }; @@ -174,11 +170,6 @@ */ // standalone. servers = "localhost:2081"; - // ensemble -// servers = bigdata.zoo1+":2181" -// + ","+bigdata.zoo2+":2181" -// + ","+bigdata.zoo3+":2181" -// ; /* Session timeout (optional). */ sessionTimeout = bigdata.sessionTimeout; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config 2013-11-13 17:22:34 UTC (rev 7541) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config 2013-11-13 17:23:29 UTC (rev 7542) @@ -98,10 +98,6 @@ // runs jini on the localhost using unicast locators. new LookupLocator("jini://localhost/") - - // runs jini on one or more hosts using unicast locators. - //new LookupLocator("jini://"+jini1), - //new LookupLocator("jini://"+jini2), }; @@ -174,11 +170,6 @@ */ // standalone. servers = "localhost:2081"; - // ensemble -// servers = bigdata.zoo1+":2181" -// + ","+bigdata.zoo2+":2181" -// + ","+bigdata.zoo3+":2181" -// ; /* Session timeout (optional). */ sessionTimeout = bigdata.sessionTimeout; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config 2013-11-13 17:22:34 UTC (rev 7541) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config 2013-11-13 17:23:29 UTC (rev 7542) @@ -98,10 +98,6 @@ // runs jini on the localhost using unicast locators. new LookupLocator("jini://localhost/") - - // runs jini on one or more hosts using unicast locators. - //new LookupLocator("jini://"+jini1), - //new LookupLocator("jini://"+jini2), }; @@ -174,11 +170,6 @@ */ // standalone. servers = "localhost:2081"; - // ensemble -// servers = bigdata.zoo1+":2181" -// + ","+bigdata.zoo2+":2181" -// + ","+bigdata.zoo3+":2181" -// ; /* Session timeout (optional). */ sessionTimeout = bigdata.sessionTimeout; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/jiniClient.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/jiniClient.config 2013-11-13 17:22:34 UTC (rev 7541) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/jiniClient.config 2013-11-13 17:23:29 UTC (rev 7542) @@ -26,10 +26,6 @@ // runs jini on the localhost using unicast locators. new LookupLocator("jini://localhost/") - - // runs jini on one or more hosts using unicast locators. - //new LookupLocator("jini://"+jini1), - //new LookupLocator("jini://"+jini2), }; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2013-11-13 17:22:41
|
Revision: 7541 http://bigdata.svn.sourceforge.net/bigdata/?rev=7541&view=rev Author: martyncutcher Date: 2013-11-13 17:22:34 +0000 (Wed, 13 Nov 2013) Log Message: ----------- Ensures that RWStore reset() synchronizes the metaTransientBits with the meteBits to protect committed allocations. Without this fix an HA service could become Leader with incorrect metabits protection, leading to problems seen in #738 where the HA followers were unable to correctly compute the delta on modified Allocators, leading to address resolution exceptions. The synchronization action has also been changed from a clone() to an arraycopy() to lessen heap pressure. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2013-11-13 17:22:17 UTC (rev 7540) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2013-11-13 17:22:34 UTC (rev 7541) @@ -1451,7 +1451,9 @@ for (int i = 0; i < m_metaBitsSize; i++) { m_metaBits[i] = strBuf.readInt(); } - m_metaTransientBits = (int[]) m_metaBits.clone(); + // m_metaTransientBits = (int[]) m_metaBits.clone(); + + syncMetaTransients(); final int numFixed = m_allocSizes.length; @@ -1478,6 +1480,20 @@ + ", " + m_metaBitsAddr); } } + + /** + * Uses System.arraycopy rather than clone() to duplicate the + * metaBits to the metaTransientBits, which will be faster. + */ + private void syncMetaTransients() { + if (m_metaTransientBits == null) { + m_metaTransientBits = (int[]) m_metaBits.clone(); + } else { + assert m_metaTransientBits.length == m_metaBits.length; + + System.arraycopy(m_metaBits, 0, m_metaTransientBits, 0, m_metaTransientBits.length); + } + } // /* // * Called when store is opened to make sure any deferred frees are @@ -2842,6 +2858,11 @@ isolatedWrites = isolatedWrites || fa.reset(m_writeCacheService, m_committedNextAllocation); } + /** + * Now clone the transient metabits for protection if this service becomes leader + */ + syncMetaTransients(); + if (!isolatedWrites) { /** * Now we should be able to unwind any unused allocators and unused @@ -3114,7 +3135,7 @@ // to provide control // writeFileSpec(); - m_metaTransientBits = (int[]) m_metaBits.clone(); + syncMetaTransients(); // Must be called from AbstractJournal commitNow after writeRootBlock // postCommit(); @@ -3500,6 +3521,9 @@ (b * cDefaultMetaBitsSize) + 1, cDefaultMetaBitsSize-1); if (ret != -1) { + // The assumption is that this bit is also NOT set in m_metaBits + assert !tstBit(m_metaBits, ret); + return ret; } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-13 17:22:23
|
Revision: 7540 http://bigdata.svn.sourceforge.net/bigdata/?rev=7540&view=rev Author: thompsonbry Date: 2013-11-13 17:22:17 +0000 (Wed, 13 Nov 2013) Log Message: ----------- Missed this file on the IIndexManagerCallable refactor. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2013-11-13 17:20:04 UTC (rev 7539) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2013-11-13 17:22:17 UTC (rev 7540) @@ -1148,19 +1148,6 @@ } @Override - public <T> Future<T> submit(final IIndexManagerCallable<T> callable, - final boolean asyncFuture) throws IOException { - - callable.setIndexManager(getIndexManager()); - - final Future<T> ft = getIndexManager().getExecutorService().submit( - callable); - - return getProxy(ft, asyncFuture); - - } - - @Override public RunStateEnum getRunStateEnum() { @SuppressWarnings("unchecked") This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-13 17:20:10
|
Revision: 7539 http://bigdata.svn.sourceforge.net/bigdata/?rev=7539&view=rev Author: thompsonbry Date: 2013-11-13 17:20:04 +0000 (Wed, 13 Nov 2013) Log Message: ----------- javadoc Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpContext.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpContext.java 2013-11-13 17:19:47 UTC (rev 7538) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpContext.java 2013-11-13 17:20:04 UTC (rev 7539) @@ -905,9 +905,9 @@ * with a binding in the source {@link IBindingSet}. * * @param left - * The left binding set. + * The left binding set (target). * @param right - * The right binding set. + * The right binding set (source). * @param constraints * An array of constraints (optional). When given, destination * {@link IBindingSet} will be validated <em>after</em> mutation. @@ -918,7 +918,8 @@ * * @return The solution with the combined bindings and <code>null</code> if * the bindings were not consistent, if a constraint was violated, - * etc. + * etc. Note that either <code>left</code> or <code>right</code> MAY + * be returned if the other solution set is empty (optimization). */ @SuppressWarnings({ "rawtypes", "unchecked" }) static public IBindingSet bind(// This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-13 17:19:53
|
Revision: 7538 http://bigdata.svn.sourceforge.net/bigdata/?rev=7538&view=rev Author: thompsonbry Date: 2013-11-13 17:19:47 +0000 (Wed, 13 Nov 2013) Log Message: ----------- @Override annotations. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/JVMHashJoinUtility.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/JVMHashJoinUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/JVMHashJoinUtility.java 2013-11-13 17:19:00 UTC (rev 7537) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/JVMHashJoinUtility.java 2013-11-13 17:19:47 UTC (rev 7538) @@ -177,6 +177,7 @@ * Human readable representation of the {@link IHashJoinUtility} metadata * (but not the solutions themselves). */ + @Override public String toString() { final StringBuilder sb = new StringBuilder(); @@ -336,22 +337,27 @@ } + @Override public JoinTypeEnum getJoinType() { return joinType; } + @Override public IVariable<?> getAskVar() { return askVar; } + @Override public IVariable<?>[] getJoinVars() { return joinVars; } + @Override public IVariable<?>[] getSelectVars() { return selectVars; } + @Override public IConstraint[] getConstraints() { return constraints; } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-13 17:19:07
|
Revision: 7537 http://bigdata.svn.sourceforge.net/bigdata/?rev=7537&view=rev Author: thompsonbry Date: 2013-11-13 17:19:00 +0000 (Wed, 13 Nov 2013) Log Message: ----------- Refactored IIndexManagerCallable out of the HAGlue interface. Some changes related to DumpJournal and TestDumpJournal for an as-yet unresolved issue with the inability to run DumpJournal with a concurrent mutation on the Journal. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/HAGlue.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/IndexManagerCallable.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/IIndexManagerCallable.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/HAGlue.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/HAGlue.java 2013-11-13 17:14:29 UTC (rev 7536) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/HAGlue.java 2013-11-13 17:19:00 UTC (rev 7537) @@ -25,11 +25,9 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.io.Serializable; import java.rmi.Remote; import java.security.DigestException; import java.security.NoSuchAlgorithmException; -import java.util.concurrent.Callable; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -48,7 +46,6 @@ import com.bigdata.ha.msg.IHASnapshotRequest; import com.bigdata.ha.msg.IHASnapshotResponse; import com.bigdata.journal.AbstractJournal; -import com.bigdata.journal.IIndexManager; import com.bigdata.journal.jini.ha.HAJournalServer; import com.bigdata.quorum.AsynchronousQuorumCloseException; import com.bigdata.quorum.QuorumException; @@ -299,7 +296,6 @@ Future<Void> rebuildFromLeader(IHARemoteRebuildRequest req) throws IOException; - /** * Run the caller's task on the service. * @@ -314,35 +310,5 @@ */ public <T> Future<T> submit(IIndexManagerCallable<T> callable, boolean asyncFuture) throws IOException; - - - public interface IIndexManagerCallable<T> extends Serializable, Callable<T> { - - /** - * Invoked before the task is executed to provide a reference to the - * {@link IIndexManager} on which it is executing. - * - * @param indexManager - * The index manager on the service. - * - * @throws IllegalArgumentException - * if the argument is <code>null</code> - * @throws IllegalStateException - * if {@link #setIndexManager(IIndexManager)} has already been - * invoked and was set with a different value. - */ - void setIndexManager(IIndexManager indexManager); - - /** - * Return the {@link IIndexManager}. - * - * @return The data service and never <code>null</code>. - * - * @throws IllegalStateException - * if {@link #setIndexManager(IIndexManager)} has not been invoked. - */ - IIndexManager getIndexManager(); - - } } Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/IIndexManagerCallable.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/IIndexManagerCallable.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/IIndexManagerCallable.java 2013-11-13 17:19:00 UTC (rev 7537) @@ -0,0 +1,66 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha; + +import java.io.Serializable; +import java.util.concurrent.Callable; + +import com.bigdata.journal.IIndexManager; + +/** + * Interface allows arbitrary tasks to be submitted to an {@link HAGlue} service + * for evaluation. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @param <T> + */ +public interface IIndexManagerCallable<T> extends Serializable, Callable<T> { + + /** + * Invoked before the task is executed to provide a reference to the + * {@link IIndexManager} on which it is executing. + * + * @param indexManager + * The index manager on the service. + * + * @throws IllegalArgumentException + * if the argument is <code>null</code> + * @throws IllegalStateException + * if {@link #setIndexManager(IIndexManager)} has already been + * invoked and was set with a different value. + */ + void setIndexManager(IIndexManager indexManager); + + /** + * Return the {@link IIndexManager}. + * + * @return The data service and never <code>null</code>. + * + * @throws IllegalStateException + * if {@link #setIndexManager(IIndexManager)} has not been + * invoked. + */ + IIndexManager getIndexManager(); + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/IndexManagerCallable.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/IndexManagerCallable.java 2013-11-13 17:14:29 UTC (rev 7536) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/IndexManagerCallable.java 2013-11-13 17:19:00 UTC (rev 7537) @@ -2,7 +2,6 @@ import org.apache.log4j.Logger; -import com.bigdata.ha.HAGlue.IIndexManagerCallable; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.jini.ha.HAJournal; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-11-13 17:14:29 UTC (rev 7536) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-11-13 17:19:00 UTC (rev 7537) @@ -100,6 +100,7 @@ import com.bigdata.ha.HAGlue; import com.bigdata.ha.HAStatusEnum; import com.bigdata.ha.HATXSGlue; +import com.bigdata.ha.IIndexManagerCallable; import com.bigdata.ha.IJoinedAndNonJoinedServices; import com.bigdata.ha.JoinedAndNonJoinedServices; import com.bigdata.ha.PrepareRequest; @@ -3106,8 +3107,8 @@ */ private final long commitToken; - /** The #of bytes on the journal as of the previous commit point. */ - private final long byteCountBefore; +// /** The #of bytes on the journal as of the previous commit point. */ +// private final long byteCountBefore; /** * The commit counter that will be assigned to the new commit point. @@ -3150,8 +3151,8 @@ this.old = store._rootBlock; - // #of bytes on the journal as of the previous commit point. - this.byteCountBefore = store._rootBlock.getNextOffset(); +// // #of bytes on the journal as of the previous commit point. +// this.byteCountBefore = store._rootBlock.getNextOffset(); this.newCommitCounter = old.getCommitCounter() + 1; @@ -5857,12 +5858,14 @@ /* * We also need to discard any active read/write tx since there - * is no longer a quorum and a read/write tx was running on the + * is no longer a quorum. This will hit both read-only + * transactions running on any service (not necessarily the + * leader) and read/write transactions if this service was the * old leader. * - * We do not need to discard read-only tx since the committed - * state should remain valid even when a quorum is lost. - * However, it would be a bit odd to leave read-only + * Note: We do not need to discard read-only tx since the + * committed state should remain valid even when a quorum is + * lost. However, it would be a bit odd to leave read-only * transactions running if you could not start a new read-only * because the quorum is not met. */ @@ -5874,7 +5877,17 @@ * * FIXME HA : Abort the unisolated connection? (esp for group * commit and the NSS level SPARQL and REST API unisolated - * operations). + * operations). Maybe we can wrap the execute of the UpdateTask + * and the execution of the REST Mutation API methods in a + * well-known ThreadGuard and then do interruptAll() to force + * the cancelation of any running task? We could also wrap any + * IIndexManagerCallable in HAGlue.submit() with a FutureTask + * implementation that uses the appropriate ThreadGuard to + * ensure that any unisolated tasks are cancelled (that is + * actually overkill since it would not differentiate TX based + * operations from unisolated operations - we could also use + * that ThreadGuard in AbstractTask). Add unit tests for both + * UPDATE and other REST mutation methods. * * @see <a * href="https://sourceforge.net/apps/trac/bigdata/ticket/753" Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java 2013-11-13 17:14:29 UTC (rev 7536) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java 2013-11-13 17:19:00 UTC (rev 7537) @@ -381,6 +381,18 @@ final boolean dumpHistory, final boolean dumpPages, final boolean dumpIndices, final boolean showTuples) { +// Note: This does not fix the issue. +// /** +// * Start a transaction. This will bracket all index access and protect +// * the data on the journal from concurrent recycling. +// * +// * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/762"> +// * DumpJournal does not protect against concurrent updates (NSS) +// * </a> +// */ +// final long tx = journal.newTx(ITx.READ_COMMITTED); +// try { +// final FileMetadata fmd = journal.getFileMetadata(); if (fmd != null) { @@ -600,6 +612,9 @@ dumpPages, dumpIndices, showTuples); } +// } finally { +// journal.abort(tx); +// } } @@ -614,7 +629,7 @@ } - public void dumpGlobalRowStore(final PrintWriter out) { + private void dumpGlobalRowStore(final PrintWriter out) { final SparseRowStore grs = journal.getGlobalRowStore(journal .getLastCommitTime()); @@ -826,7 +841,7 @@ * * @return */ - public String dumpRawRecord(final long addr) { + private String dumpRawRecord(final long addr) { if (journal.getBufferStrategy() instanceof IRWStrategy) { /** @@ -984,6 +999,7 @@ } } case Stream: + @SuppressWarnings("unused") final Stream stream = (Stream) ndx; /* * Note: We can't do anything here with a Stream, but we do @@ -1004,41 +1020,4 @@ } - /** - * Return the data in the buffer. - */ - public static byte[] getBytes(ByteBuffer buf) { - - if (buf.hasArray() && buf.arrayOffset() == 0 && buf.position() == 0 - && buf.limit() == buf.capacity()) { - - /* - * Return the backing array. - */ - - return buf.array(); - - } - - /* - * Copy the expected data into a byte[] using a read-only view on the - * buffer so that we do not mess with its position, mark, or limit. - */ - final byte[] a; - { - - buf = buf.asReadOnlyBuffer(); - - final int len = buf.remaining(); - - a = new byte[len]; - - buf.get(a); - - } - - return a; - - } - } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java 2013-11-13 17:14:29 UTC (rev 7536) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java 2013-11-13 17:19:00 UTC (rev 7537) @@ -29,15 +29,23 @@ package com.bigdata.journal; import java.io.IOException; +import java.util.LinkedList; +import java.util.List; import java.util.UUID; +import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; +import java.util.concurrent.FutureTask; +import java.util.concurrent.TimeUnit; import com.bigdata.btree.AbstractBTreeTestCase; import com.bigdata.btree.BTree; import com.bigdata.btree.HTreeIndexMetadata; import com.bigdata.btree.IndexMetadata; import com.bigdata.btree.keys.KV; +import com.bigdata.concurrent.FutureTaskMon; import com.bigdata.htree.HTree; +import com.bigdata.rwstore.IRWStrategy; +import com.bigdata.util.concurrent.LatchedExecutor; /** * Test suite for {@link DumpJournal}. @@ -66,8 +74,10 @@ /** * @param name */ - public TestDumpJournal(String name) { + public TestDumpJournal(final String name) { + super(name); + } /** @@ -361,4 +371,254 @@ } + /** + * Unit test for {@link DumpJournal} with concurrent updates against the + * backing store. This is intended primarily to detect failures to protect + * against the recycling model associated with the {@link IRWStrategy}. + * + * @throws Exception + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/762"> + * DumpJournal does not protect against concurrent updates (NSS) </a> + */ + public void test_dumpJournal_concurrent_updates() throws Exception { + + final String PREFIX = "testIndex#"; + final int NUM_INDICES = 4; + + Journal src = getStore(getProperties()); + + try { + + for (int i = 0; i < NUM_INDICES; i++) { + + // register an index + final String name = PREFIX + i; + + src.registerIndex(new IndexMetadata(name, UUID.randomUUID())); + { + + // lookup the index. + final BTree ndx = src.getIndex(name); + + // #of tuples to write. + final int ntuples = r.nextInt(1000); + + // generate random data. + final KV[] a = AbstractBTreeTestCase + .getRandomKeyValues(ntuples); + + // write tuples (in random order) + for (KV kv : a) { + + ndx.insert(kv.key, kv.val); + + if (r.nextInt(100) < 10) { + + // randomly increment the counter (10% of the time). + ndx.getCounter().incrementAndGet(); + + } + + } + + } + + } + + // commit the journal (!) + src.commit(); + + /** + * Task to run various DumpJournal requests. + */ + final class DumpTask implements Callable<Void> { + + private final Journal src; + + public DumpTask(final Journal src) { + + this.src = src; + + } + @Override + public Void call() throws Exception { + + new DumpJournal(src) + .dumpJournal(false/* dumpHistory */, + true/* dumpPages */, + false/* dumpIndices */, false/* showTuples */); + + new DumpJournal(src) + .dumpJournal(true/* dumpHistory */, + true/* dumpPages */, true/* dumpIndices */, + false/* showTuples */); + + // test again w/o dumpPages + new DumpJournal(src) + .dumpJournal(true/* dumpHistory */, + false/* dumpPages */, + true/* dumpIndices */, false/* showTuples */); + + return (Void) null; + + } + + } + + final class UpdateTask implements Callable<Void> { + + private final Journal src; + + public UpdateTask(final Journal src) { + + this.src = src; + + } + @Override + public Void call() throws Exception { + + /* + * Now write some more data, going through a series of commit + * points. This let's us check access to historical commit points. + */ + for (int j = 0; j < 10; j++) { + + for (int i = 0; i < NUM_INDICES; i++) { + + // register an index + final String name = PREFIX + i; + + // lookup the index. + final BTree ndx = src.getIndex(name); + + // #of tuples to write. + final int ntuples = r.nextInt(1000); + + // generate random data. + final KV[] a = AbstractBTreeTestCase + .getRandomKeyValues(ntuples); + + // write tuples (in random order) + for (KV kv : a) { + + ndx.insert(kv.key, kv.val); + + if (r.nextInt(100) < 10) { + + // randomly increment the counter (10% of the time). + ndx.getCounter().incrementAndGet(); + + } + + } + + } + + log.info("Will commit"); + src.commit(); + log.info("Did commit"); + + } + + return (Void) null; + } + } + + final List<FutureTask<Void>> tasks1 = new LinkedList<FutureTask<Void>>(); + final List<FutureTask<Void>> tasks2 = new LinkedList<FutureTask<Void>>(); + final List<FutureTask<Void>> tasksAll = new LinkedList<FutureTask<Void>>(); + + // Setup executor that limits parallelism. + final LatchedExecutor executor1 = new LatchedExecutor( + src.getExecutorService(), 1/* nparallel */); + + // Setup executor that limits parallelism. + final LatchedExecutor executor2 = new LatchedExecutor( + src.getExecutorService(), 1/* nparallel */); + + try { + + // Tasks to run. + tasks1.add(new FutureTaskMon<Void>(new DumpTask(src))); + tasks1.add(new FutureTaskMon<Void>(new DumpTask(src))); + tasks1.add(new FutureTaskMon<Void>(new DumpTask(src))); + + tasks2.add(new FutureTaskMon<Void>(new UpdateTask(src))); + tasks2.add(new FutureTaskMon<Void>(new UpdateTask(src))); + tasks2.add(new FutureTaskMon<Void>(new UpdateTask(src))); + + // Schedule the tasks. + for (FutureTask<Void> ft : tasks1) + executor1.execute(ft); + for (FutureTask<Void> ft : tasks2) + executor2.execute(ft); + + log.info("Blocking for futures"); + + // Wait for tasks. + tasksAll.addAll(tasks1); + tasksAll.addAll(tasks2); + int ndone = 0; + for (FutureTask<Void> ft : tasksAll) { + + /* + * Check Future. + * + * Note: sanity check for test termination w/ timeout. + */ + + try { + ft.get(2, TimeUnit.MINUTES); + } catch (ExecutionException ex) { + log.error("ndone=" + ndone, ex); + throw ex; + } + + log.info("ndone=" + ndone); + ndone++; + } + + } finally { + + // Ensure tasks are terminated. + for (FutureTask<Void> ft : tasksAll) { + + ft.cancel(true/*mayInterruptIfRunning*/); + + } + + } + + if (src.isStable()) { + + src = reopenStore(src); + + // Try running the DumpTask again. + new DumpTask(src).call(); + + } + + } finally { + + src.destroy(); + + } + + } + /** Stress test to look for different failure modes. */ + public void _testStress_dumpJournal_concurrent_updates() throws Exception { + final int LIMIT = 20; + for (int i = 0; i < LIMIT; i++) { + if (i > 1) + setUp(); + try { + test_dumpJournal_concurrent_updates(); + } catch (Exception ex) { + log.fatal("FAILURE: i=" + i + ", cause=" + ex); + } + if (i + 1 < LIMIT) + tearDown(); + } + } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-13 17:14:36
|
Revision: 7536 http://bigdata.svn.sourceforge.net/bigdata/?rev=7536&view=rev Author: thompsonbry Date: 2013-11-13 17:14:29 +0000 (Wed, 13 Nov 2013) Log Message: ----------- failed to call super.setUp() in TestCase3 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/TestCase3.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/TestCase3.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/TestCase3.java 2013-11-13 15:34:09 UTC (rev 7535) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/TestCase3.java 2013-11-13 17:14:29 UTC (rev 7536) @@ -73,7 +73,7 @@ @Override protected void setUp() throws Exception { - + super.setUp(); r = new Random(); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-13 15:34:15
|
Revision: 7535 http://bigdata.svn.sourceforge.net/bigdata/?rev=7535&view=rev Author: thompsonbry Date: 2013-11-13 15:34:09 +0000 (Wed, 13 Nov 2013) Log Message: ----------- Modified the test harness to handle what appears to be a relatively common case on ci.bigdata.com where none of the services are running (serverA==serverB==serverC==null) but a proxy for a leader was discovered through a data race where zookeeper and/or the test harnesses local lookup service cache are not yet caught up with the service tear downs. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-11-12 20:28:53 UTC (rev 7534) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-11-13 15:34:09 UTC (rev 7535) @@ -458,21 +458,35 @@ if (quorum.isQuorumMet()) { final long token = quorum.awaitQuorum(awaitQuorumTimeout, TimeUnit.MILLISECONDS); - leader = quorum.getClient().getLeader(token); - if (leader.equals(serverA)) { + /* + * Note: It is possible to resolve a proxy for a service that + * has been recently shutdown or destroyed. This is effectively + * a data race. + */ + final HAGlue t = quorum.getClient().getLeader(token); + if (t.equals(serverA)) { + leader = t; leaderServiceDir = getServiceDirA(); leaderListener = serviceListenerA; - } else if (leader.equals(serverB)) { + } else if (t.equals(serverB)) { + leader = t; leaderServiceDir = getServiceDirB(); leaderListener = serviceListenerB; - } else if (leader.equals(serverC)) { + } else if (t.equals(serverC)) { + leader = t; leaderServiceDir = getServiceDirC(); leaderListener = serviceListenerC; - } else {// log warning and fall through. + } else { + if (serverA == null && serverB == null && serverC == null) { + /* + * There are no services running and nothing to shutdown. We + * probably resolved a stale proxy to the leader above. + */ + return; + } throw new IllegalStateException( - "Leader is none of A, B, or C: leader=" + leader - + ", A=" + serverA + ", B=" + serverB + ", C=" - + serverC); + "Leader is none of A, B, or C: leader=" + t + ", A=" + + serverA + ", B=" + serverB + ", C=" + serverC); } } else { leader = null; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-12 20:29:01
|
Revision: 7534 http://bigdata.svn.sourceforge.net/bigdata/?rev=7534&view=rev Author: thompsonbry Date: 2013-11-12 20:28:53 +0000 (Tue, 12 Nov 2013) Log Message: ----------- Increased the timeout when awaiting a specific HALog count on a service. This is to help out on ci.bigdata.com, which has less RAM and a slower CPU. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2013-11-12 17:11:51 UTC (rev 7533) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2013-11-12 20:28:53 UTC (rev 7534) @@ -1195,10 +1195,11 @@ protected void awaitLogCount(final File logDir, final long count) { assertCondition(new Runnable() { + @Override public void run() { assertLogCount(logDir, count); } - }, 5000, TimeUnit.MILLISECONDS); + }, 10000, TimeUnit.MILLISECONDS); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-12 17:11:57
|
Revision: 7533 http://bigdata.svn.sourceforge.net/bigdata/?rev=7533&view=rev Author: thompsonbry Date: 2013-11-12 17:11:51 +0000 (Tue, 12 Nov 2013) Log Message: ----------- Addding an explicit reference to a MAVEN_HOME variable so we can find mvn on ci.bigdata.com. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.xml Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-12 16:43:34 UTC (rev 7532) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-12 17:11:51 UTC (rev 7533) @@ -333,7 +333,7 @@ <!-- Deploy the JAR to the maven repository. --> <target name="maven-deploy" depends="jar" description="Deploy the jar to the maven repository."> - <exec command="mvn"> + <exec command="${MAVEN_HOME}/bin/mvn"> <arg value="clean"/> <arg value="deploy"/> </exec> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-12 16:43:40
|
Revision: 7532 http://bigdata.svn.sourceforge.net/bigdata/?rev=7532&view=rev Author: thompsonbry Date: 2013-11-12 16:43:34 +0000 (Tue, 12 Nov 2013) Log Message: ----------- updated pom to correct the inline comments for publishing dependencies to the bigdata maven repository and provided a link to the release guide on the bigdata wiki, which now has a section on maven. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/pom.xml Modified: branches/BIGDATA_RELEASE_1_3_0/pom.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/pom.xml 2013-11-12 16:17:13 UTC (rev 7531) +++ branches/BIGDATA_RELEASE_1_3_0/pom.xml 2013-11-12 16:43:34 UTC (rev 7532) @@ -39,7 +39,11 @@ repository. There are commands in comment blocks in this POM that will deploy those dependencies. Be sure to update the version numbers in the commands first! - + +See https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=ReleaseGuide +for information on publishing maven releases and snapshots for bigdata and its +dependencies. + --> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" @@ -316,7 +320,7 @@ -Dversion=5.1.5 \ -Dpackaging=jar \ -DrepositoryId=bigdata.releases \ - -Durl=scpexe://shell.speakeasy.net/home/t/thompsonbry/systap.com/maven/releases/ \ + -Durl=scpexe://www.systap.com/srv/www/htdocs/systap.com/maven/releases/ \ -Dfile=bigdata/lib/unimi/fastutil-5.1.5.jar --> <groupId>it.unimi</groupId> @@ -332,7 +336,7 @@ -Dversion=4.8 \ -Dpackaging=jar \ -DrepositoryId=bigdata.releases \ - -Durl=scpexe://shell.speakeasy.net/home/t/thompsonbry/systap.com/maven/releases/ \ + -Durl=scpexe://www.systap.com/srv/www/htdocs/systap.com/maven/releases/ \ -Dfile=bigdata/lib/icu/icu4j-charset-4.8.jar --> <groupId>com.ibm.icu</groupId> @@ -355,7 +359,7 @@ -Dversion=1.0.1 \ -Dpackaging=jar \ -DrepositoryId=bigdata.releases \ - -Durl=scpexe://shell.speakeasy.net/home/t/thompsonbry/systap.com/maven/releases/ \ + -Durl=scpexe://www.systap.com/srv/www/htdocs/systap.com/maven/releases/ \ -Dfile=bigdata/lib/bigdata-ganglia-1.0.1.jar --> <groupId>com.bigdata</groupId> @@ -370,7 +374,7 @@ -Dversion=1.0.6-020610 \ -Dpackaging=jar \ -DrepositoryId=bigdata.releases \ - -Durl=scpexe://shell.speakeasy.net/home/t/thompsonbry/systap.com/maven/releases/ \ + -Durl=scpexe://www.systap.com/srv/www/htdocs/systap.com/maven/releases/ \ -Dfile=bigdata/lib/dsi-utils-1.0.6-020610.jar --> <dependency> @@ -386,7 +390,7 @@ -Dversion=1.0.6-020610 \ -Dpackaging=jar \ -DrepositoryId=bigdata.releases \ - -Durl=scpexe://shell.speakeasy.net/home/t/thompsonbry/systap.com/maven/releases/ \ + -Durl=scpexe://www.systap.com/srv/www/htdocs/systap.com/maven/releases/ \ -Dfile=bigdata/lib/lgpl-utils-1.0.6-020610.jar --> <groupId>com.bigdata</groupId> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-12 16:17:21
|
Revision: 7531 http://bigdata.svn.sourceforge.net/bigdata/?rev=7531&view=rev Author: thompsonbry Date: 2013-11-12 16:17:13 +0000 (Tue, 12 Nov 2013) Log Message: ----------- Changed the pom to indicate the new path on www.systap.com to the maven repository. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/pom.xml Modified: branches/BIGDATA_RELEASE_1_3_0/pom.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/pom.xml 2013-11-11 21:48:16 UTC (rev 7530) +++ branches/BIGDATA_RELEASE_1_3_0/pom.xml 2013-11-12 16:17:13 UTC (rev 7531) @@ -154,12 +154,12 @@ <repository> <id>bigdata.releases</id> <name>bigdata(R) releases</name> - <url>scpexe://shell.speakeasy.net/home/t/thompsonbry/systap.com/maven/releases</url> + <url>scpexe://www.systap.com/srv/www/htdocs/systap.com/maven/releases</url> </repository> <snapshotRepository> <id>bigdata.snapshots</id> <name>bigdata(R) snapshots</name> - <url>scpexe://shell.speakeasy.net/home/t/thompsonbry/systap.com/maven/snapshots</url> + <url>scpexe://www.systap.com/srv/www/htdocs/systap.com/maven/snapshots</url> <uniqueVersion>true</uniqueVersion> </snapshotRepository> </distributionManagement> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-11 21:48:22
|
Revision: 7530 http://bigdata.svn.sourceforge.net/bigdata/?rev=7530&view=rev Author: thompsonbry Date: 2013-11-11 21:48:16 +0000 (Mon, 11 Nov 2013) Log Message: ----------- doABCMultiLoadFollowerReads2() was failing to cancel each of the submitted futures. It looks like it was simply not refactored correctly when creating it from another test. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-11-11 18:32:53 UTC (rev 7529) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-11-11 21:48:16 UTC (rev 7530) @@ -2382,151 +2382,117 @@ } /** - * Similar to multitransaction but rather than a number of updates following a load it is simply a number of loads - * followed by queries on the folowers that are checkd for consistency. + * Similar to multitransaction but rather than a number of updates following + * a load it is simply a number of loads followed by queries on the folowers + * that are checked for consistency. * * @param loads - * @param transactionDelay - * @throws Exception + * The number of LOAD operations to perform. + * @param largeLoad + * If true, the load a large file. */ protected void doABCMultiLoadFollowerReads2(final int nTransactions, final boolean largeLoad) throws Exception { -// try { + // Start all services. + final ABC services = new ABC(true/* sequential */); - // Start all services. - final ABC services = new ABC(true/* sequential */); + // Wait for a quorum meet. + final long token = quorum.awaitQuorum(awaitQuorumTimeout, + TimeUnit.MILLISECONDS); - // Wait for a quorum meet. - final long token = quorum.awaitQuorum(awaitQuorumTimeout, - TimeUnit.MILLISECONDS); + assertEquals(token, awaitFullyMetQuorum()); - assertEquals(token, awaitFullyMetQuorum()); + final HAGlue leader = quorum.getClient().getLeader(token); - final HAGlue leader = quorum.getClient().getLeader(token); + // Verify assumption in this test. + assertEquals(leader, services.serverA); - // Verify assumption in this test. - assertEquals(leader, services.serverA); + // Wait until all services are "HA" ready. + leader.awaitHAReady(awaitQuorumTimeout, TimeUnit.MILLISECONDS); + services.serverB + .awaitHAReady(awaitQuorumTimeout, TimeUnit.MILLISECONDS); + services.serverC + .awaitHAReady(awaitQuorumTimeout, TimeUnit.MILLISECONDS); - // Wait until all services are "HA" ready. - leader.awaitHAReady(awaitQuorumTimeout, TimeUnit.MILLISECONDS); - services.serverB.awaitHAReady(awaitQuorumTimeout, TimeUnit.MILLISECONDS); - services.serverC.awaitHAReady(awaitQuorumTimeout, TimeUnit.MILLISECONDS); + /* + * Now create a Callable for the final followes to repeatedly query + * against the then current commit point. The task returns the #of + * queries that were executed. The task will run until we stop issuing + * UPDATE requests. + */ + class QueryTask implements Callable<Long> { - /* - * Now create a Callable for the final followes to repeatedly query - * against the then current commit point. The task returns the #of - * queries that were executed. The task will run until we stop - * issuing UPDATE requests. - */ - class QueryTask implements Callable<Long> { - - /** The service to query. */ - private final HAGlue haGlue; - -// /** -// * The SPARQL end point for that service. -// */ -// final RemoteRepository remoteRepo; -// -// /** -// * Format for timestamps that may be used to correlate with the -// * HA log messages. -// */ -// final SimpleDateFormat df = new SimpleDateFormat("hh:mm:ss,SSS"); + /** The service to query. */ + private final HAGlue haGlue; - /** - * @param haGlue - * The service to query. - * - * @throws IOException - */ - public QueryTask(final HAGlue haGlue) throws IOException { - -// this.haGlue = haGlue; - - /* - * Run query against one of the followers. - * - * 6537 queries for 2000 transactions (leader) - * - * 10109 queries for 2000 transactions (follower) - */ -// remoteRepo = getRemoteRepository(haGlue); - this.haGlue = haGlue; + public QueryTask(final HAGlue haGlue) throws IOException { - } + this.haGlue = haGlue; - public Long call() throws Exception { - - return getCountStar(haGlue); - -// final String query = "SELECT (COUNT(*) AS ?count) WHERE { ?s ?p ?o }"; -// -// // Verify quorum is still valid. -// quorum.assertQuorum(token); -// -// // Run query. -// final TupleQueryResult result = remoteRepo -// .prepareTupleQuery(query).evaluate(); -// -// final BindingSet bs = result.next(); -// -// // done. -// final Value v = bs.getBinding("count").getValue(); -// -// return (long) ((org.openrdf.model.Literal) v).intValue(); - } - - }; + } - final FutureTask<Long> queryTaskFuture = new FutureTask<Long>( - new QueryTask(services.serverC)); + @Override + public Long call() throws Exception { - /* - * Sequentially run repeated loads and after each load submit queries on all services, - * checking for consistency. - */ + return getCountStar(haGlue); + } + + } // class QueryTask + + /* + * Sequentially run repeated loads and after each load submit queries on + * all services, checking for consistency. + */ + + for (int t = 0; t < nTransactions; t++) { + + // Create tasks, but do not execute yet. + final FutureTask<Void> loadTaskFuture = new FutureTask<Void>( + new LargeLoadTask(token, largeLoad/* reallyLargeLoad */)); + final FutureTask<Long> qAFuture = new FutureTask<Long>( + new QueryTask(services.serverA)); + final FutureTask<Long> qBFuture = new FutureTask<Long>( + new QueryTask(services.serverB)); + final FutureTask<Long> qCFuture = new FutureTask<Long>( + new QueryTask(services.serverC)); + try { - for (int t = 0 ; t < nTransactions; t++) { - final FutureTask<Void> loadTaskFuture = new FutureTask<Void>(new LargeLoadTask(token, largeLoad/* reallyLargeLoad */)); - executorService.submit(loadTaskFuture); - loadTaskFuture.get(); // wait on load! - final FutureTask<Long> qAFuture = new FutureTask<Long>(new QueryTask(services.serverA)); - final FutureTask<Long> qBFuture = new FutureTask<Long>(new QueryTask(services.serverB)); - final FutureTask<Long> qCFuture = new FutureTask<Long>(new QueryTask(services.serverC)); - - executorService.submit(qAFuture); - executorService.submit(qBFuture); - executorService.submit(qCFuture); - - if (log.isInfoEnabled()) - log.info("StatementsA: " + qAFuture.get() - + ", StatementsB: " + qBFuture.get() - + ", StatementsC: " + qCFuture.get() - ); - - assertTrue(qAFuture.get().equals(qBFuture.get())); - assertTrue(qAFuture.get().equals(qCFuture.get())); - } + // Execute LOAD. + executorService.submit(loadTaskFuture); + loadTaskFuture.get(); // wait on load! + // Execute query tasks. + executorService.submit(qAFuture); + executorService.submit(qBFuture); + executorService.submit(qCFuture); + + if (log.isInfoEnabled()) + log.info("StatementsA: " + qAFuture.get() + + ", StatementsB: " + qBFuture.get() + + ", StatementsC: " + qCFuture.get()); + + assertEquals(qAFuture.get(), qBFuture.get()); + assertEquals(qAFuture.get(), qCFuture.get()); + } finally { - - queryTaskFuture.cancel(true/* mayInterruptIfRunning */); + // Ensure all tasks are cancelled. + loadTaskFuture.cancel(true/* mayInterruptIfRunning */); + qAFuture.cancel(true/* mayInterruptIfRunning */); + qBFuture.cancel(true/* mayInterruptIfRunning */); + qCFuture.cancel(true/* mayInterruptIfRunning */); + } - // Finally cehck for binary compatibility - assertDigestsEquals(new HAGlue[] { services.serverA, services.serverB, services.serverC }); + } -// } finally { -// -// destroyAll(); -// -// } - + // Finally check for binary compatibility + assertDigestsEquals(new HAGlue[] { services.serverA, services.serverB, + services.serverC }); + } /** This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-11 18:33:00
|
Revision: 7529 http://bigdata.svn.sourceforge.net/bigdata/?rev=7529&view=rev Author: thompsonbry Date: 2013-11-11 18:32:53 +0000 (Mon, 11 Nov 2013) Log Message: ----------- Modified destroyAll() to provide more information if leader is none of A, B, or C. Removed explicit destroyAll() call from testABCMultiLoadFollowerReadsLargeLoad(). Not required. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-11-11 17:31:44 UTC (rev 7528) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-11-11 18:32:53 UTC (rev 7529) @@ -468,8 +468,11 @@ } else if (leader.equals(serverC)) { leaderServiceDir = getServiceDirC(); leaderListener = serviceListenerC; - } else { - throw new IllegalStateException(); + } else {// log warning and fall through. + throw new IllegalStateException( + "Leader is none of A, B, or C: leader=" + leader + + ", A=" + serverA + ", B=" + serverB + ", C=" + + serverC); } } else { leader = null; @@ -531,6 +534,7 @@ final UUID[] services = getServices(members); assertCondition(new Runnable() { + @Override public void run() { try { assertEquals(services, quorum.getPipeline()); @@ -570,6 +574,7 @@ final UUID[] services = getServices(members); assertCondition(new Runnable() { + @Override public void run() { try { assertEquals(services, quorum.getJoined()); @@ -594,6 +599,7 @@ final UUID[] services = getServices(members); assertCondition(new Runnable() { + @Override public void run() { try { assertEquals(services, quorum.getMembers()); @@ -855,15 +861,16 @@ } private void tidyServiceDirectory(final File serviceDir) { - if (serviceDir == null || !serviceDir.exists()) - return; - + if (serviceDir == null || !serviceDir.exists()) + return; + for (File file : serviceDir.listFiles()) { - final String name = file.getName(); - - if (name.endsWith(".jnl") || name.equals("snapshot") || name.equals("HALog")) { - recursiveDelete(file); - } + final String name = file.getName(); + + if (name.endsWith(".jnl") || name.equals("snapshot") + || name.equals("HALog")) { + recursiveDelete(file); + } } } @@ -928,48 +935,50 @@ } protected void destroyA() { - safeDestroy(serverA, getServiceDirA(), serviceListenerA); - serverA = null; - serviceListenerA = null; + safeDestroy(serverA, getServiceDirA(), serviceListenerA); + serverA = null; + serviceListenerA = null; } protected void destroyB() { - safeDestroy(serverB, getServiceDirB(), serviceListenerB); - serverB = null; - serviceListenerB = null; + safeDestroy(serverB, getServiceDirB(), serviceListenerB); + serverB = null; + serviceListenerB = null; } protected void destroyC() { - safeDestroy(serverC, getServiceDirC(), serviceListenerC); - serverC = null; - serviceListenerC = null; + safeDestroy(serverC, getServiceDirC(), serviceListenerC); + serverC = null; + serviceListenerC = null; } protected void shutdownA() throws IOException { - safeShutdown(serverA, getServiceDirA(), serviceListenerA, true); - - serverA = null; - serviceListenerA = null; + safeShutdown(serverA, getServiceDirA(), serviceListenerA, true); + + serverA = null; + serviceListenerA = null; } - + protected void shutdownB() throws IOException { - safeShutdown(serverB, getServiceDirB(), serviceListenerB, true); - - serverB = null; - serviceListenerB = null; + safeShutdown(serverB, getServiceDirB(), serviceListenerB, true); + + serverB = null; + serviceListenerB = null; } - + protected void shutdownC() throws IOException { - safeShutdown(serverC, getServiceDirC(), serviceListenerC, true); - - serverC = null; - serviceListenerC = null; + safeShutdown(serverC, getServiceDirC(), serviceListenerC, true); + + serverC = null; + serviceListenerC = null; } - + protected void kill(final HAGlue service) throws IOException { + final int pid = ((HAGlueTest) service).getPID(); - + trySignal(SignalEnum.KILL, pid); + } /** @@ -977,19 +986,20 @@ * necessarily something we should rely on */ protected void shutdown(final HAGlue service) throws IOException { - if (service == null) { - throw new IllegalArgumentException(); - } - - if (service.equals(serverA)) { - shutdownA(); - } else if (service.equals(serverB)) { - shutdownB(); - } else if (service.equals(serverC)) { - shutdownC(); - } else { - throw new IllegalArgumentException("Unable to match service: " + service + " possible problem with equals() on Proxy"); - } + if (service == null) { + throw new IllegalArgumentException(); + } + + if (service.equals(serverA)) { + shutdownA(); + } else if (service.equals(serverB)) { + shutdownB(); + } else if (service.equals(serverC)) { + shutdownC(); + } else { + throw new IllegalArgumentException("Unable to match service: " + + service + " possible problem with equals() on Proxy"); + } } protected void shutdownLeader() throws AsynchronousQuorumCloseException, @@ -1028,6 +1038,7 @@ } + @Override public Void call() { safeShutdown(haGlue, serviceDir, serviceListener, now); @@ -1133,6 +1144,7 @@ final File serviceDir, final ServiceListener serviceListener) { assertCondition(new Runnable() { + @Override public void run() { try { haGlue.getRunState(); @@ -1145,6 +1157,7 @@ }); assertCondition(new Runnable() { + @Override public void run() { // try to discover the service item. @@ -1164,6 +1177,7 @@ try { assertCondition(new Runnable() { + @Override public void run() { // Wait for the process death. assertTrue(serviceListener.isDead()); @@ -1517,6 +1531,7 @@ } + @Override public HAGlue call() throws Exception { if (restart) { @@ -1542,6 +1557,7 @@ } + @Override public HAGlue call() throws Exception { if (restart) { @@ -1567,6 +1583,7 @@ } + @Override public HAGlue call() throws Exception { if (restart) { @@ -1621,15 +1638,15 @@ } protected UUID getServiceAId() { - return serverAId; + return serverAId; } - + protected UUID getServiceBId() { - return serverBId; + return serverBId; } - + protected UUID getServiceCId() { - return serverCId; + return serverCId; } private HAGlue startServer(final String name, Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-11-11 17:31:44 UTC (rev 7528) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-11-11 18:32:53 UTC (rev 7529) @@ -2360,19 +2360,25 @@ } /** - * Similar to multitransaction but rather than a number of updates following a load it is simply a number of loads - * followed by queries on the folowers that are checkd for consistency. + * Similar to multitransaction but rather than a number of updates following + * a load it is simply a number of loads followed by queries on the folowers + * that are checkd for consistency. */ public void testABCMultiLoadFollowerReads() throws Exception { - doABCMultiLoadFollowerReads2(50/*nTransactions*/, false/*largeLoad*/); + + doABCMultiLoadFollowerReads2(50/* nTransactions */, false/* largeLoad */); + } - + /** - * Similar to multitransaction but rather than a number of updates following a load it is simply a number of loads - * followed by queries on the folowers that are checkd for consistency. + * Similar to multitransaction but rather than a number of updates following + * a load it is simply a number of loads followed by queries on the folowers + * that are checkd for consistency. */ public void testABCMultiLoadFollowerReadsLargeLoad() throws Exception { - doABCMultiLoadFollowerReads2(20/*nTransactions*/, true/*largeLoad*/); + + doABCMultiLoadFollowerReads2(20/* nTransactions */, true/* largeLoad */); + } /** @@ -2386,7 +2392,7 @@ protected void doABCMultiLoadFollowerReads2(final int nTransactions, final boolean largeLoad) throws Exception { - try { +// try { // Start all services. final ABC services = new ABC(true/* sequential */); @@ -2515,11 +2521,11 @@ // Finally cehck for binary compatibility assertDigestsEquals(new HAGlue[] { services.serverA, services.serverB, services.serverC }); - } finally { - - destroyAll(); - - } +// } finally { +// +// destroyAll(); +// +// } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-11 17:31:50
|
Revision: 7528 http://bigdata.svn.sourceforge.net/bigdata/?rev=7528&view=rev Author: thompsonbry Date: 2013-11-11 17:31:44 +0000 (Mon, 11 Nov 2013) Log Message: ----------- correctly raised the logger to FATAL to ignore ERROR messages for ill-formed literals. log4j.logger.com.bigdata.rdf.internal.LexiconConfiguration=FATAL Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties 2013-11-11 17:27:34 UTC (rev 7527) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties 2013-11-11 17:31:44 UTC (rev 7528) @@ -14,7 +14,7 @@ log4j.logger.com.bigdata.io.writecache=INFO #log4j.logger.com.bigdata.zookeeper=INFO #log4j.logger.com.bigdata.zookeeper.ZooHelper=ALL -com.bigdata.rdf.internal.LexiconConfiguration=FATAL +log4j.logger.com.bigdata.rdf.internal.LexiconConfiguration=FATAL log4j.appender.haLog=org.apache.log4j.FileAppender log4j.appender.haLog.Threshold=ALL Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties 2013-11-11 17:27:34 UTC (rev 7527) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties 2013-11-11 17:31:44 UTC (rev 7528) @@ -14,7 +14,7 @@ #log4j.logger.com.bigdata.io.writecache=INFO #log4j.logger.com.bigdata.zookeeper=INFO #log4j.logger.com.bigdata.zookeeper.ZooHelper=ALL -com.bigdata.rdf.internal.LexiconConfiguration=FATAL +log4j.logger.com.bigdata.rdf.internal.LexiconConfiguration=FATAL log4j.appender.haLog=org.apache.log4j.FileAppender log4j.appender.haLog.Threshold=ALL Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties 2013-11-11 17:27:34 UTC (rev 7527) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties 2013-11-11 17:31:44 UTC (rev 7528) @@ -14,7 +14,7 @@ #log4j.logger.com.bigdata.io.writecache=INFO #log4j.logger.com.bigdata.zookeeper=INFO #log4j.logger.com.bigdata.zookeeper.ZooHelper=ALL -com.bigdata.rdf.internal.LexiconConfiguration=FATAL +log4j.logger.com.bigdata.rdf.internal.LexiconConfiguration=FATAL log4j.appender.haLog=org.apache.log4j.FileAppender log4j.appender.haLog.Threshold=ALL This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-11 17:27:41
|
Revision: 7527 http://bigdata.svn.sourceforge.net/bigdata/?rev=7527&view=rev Author: thompsonbry Date: 2013-11-11 17:27:34 +0000 (Mon, 11 Nov 2013) Log Message: ----------- increased timeout on await service start in test harness. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-11-11 15:31:24 UTC (rev 7526) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-11-11 17:27:34 UTC (rev 7527) @@ -1786,7 +1786,7 @@ } } - }); + }, 10, TimeUnit.SECONDS); return haGlue; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |