This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tho...@us...> - 2013-12-03 17:58:47
|
Revision: 7601 http://bigdata.svn.sourceforge.net/bigdata/?rev=7601&view=rev Author: thompsonbry Date: 2013-12-03 17:58:35 +0000 (Tue, 03 Dec 2013) Log Message: ----------- Reconciling changes with Martyn for the postHACommit(), meta-bits maintenance on replication and leader failure, and in the HA test suite. Changes to: - StoreState interface - IHABufferStrategy.getStoreState() - RWStrategy.getStoreState() - WormStrategy.getStoreState() - FixedAllocator log @ WARN - RWStore (for unpacking more information from the new root block in postHACommit()). - HAJournalTest (for getStoreState() method on HAGlueTest). - AbstractHA3JournalServerTestCase.assertStoreStates() - TestHA3ChangeLeader (but disabled the kill-based test since this can trigger the socket resync problem and that code was not brought across) - BytesUtil.toHexString() allows a null argument. See #778. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/BytesUtil.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3DumpLogs.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/BytesUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/BytesUtil.java 2013-11-27 14:24:34 UTC (rev 7600) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/BytesUtil.java 2013-12-03 17:58:35 UTC (rev 7601) @@ -1576,6 +1576,9 @@ */ static public String toHexString(final byte[] buf) { + if (buf == null) + return "NULL"; + return toHexString(buf, buf.length); } @@ -1591,6 +1594,10 @@ * @return The hex string. */ static public String toHexString(final byte[] buf, int n) { + + if (buf == null) + return "NULL"; + n = n < buf.length ? n : buf.length; final StringBuffer out = new StringBuffer(); for (int i = 0; i < n; i++) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java 2013-11-27 14:24:34 UTC (rev 7600) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java 2013-12-03 17:58:35 UTC (rev 7601) @@ -23,11 +23,7 @@ */ package com.bigdata.journal; -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.EOFException; import java.io.IOException; -import java.io.InputStream; import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; @@ -42,8 +38,6 @@ import com.bigdata.mdi.IResourceMetadata; import com.bigdata.rawstore.AbstractRawWormStore; import com.bigdata.rawstore.Bytes; -import com.bigdata.rawstore.IAllocationContext; -import com.bigdata.rawstore.IPSOutputStream; import com.bigdata.rawstore.IRawStore; import com.bigdata.rawstore.WormAddressManager; import com.bigdata.resources.ResourceManager; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java 2013-11-27 14:24:34 UTC (rev 7600) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java 2013-12-03 17:58:35 UTC (rev 7601) @@ -280,4 +280,11 @@ */ WriteCacheService getWriteCacheService(); + /** + * A StoreState object references critical transient data that can be used + * to determine a degree of consistency between stores, specifically for an + * HA context. + */ + StoreState getStoreState(); + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2013-11-27 14:24:34 UTC (rev 7600) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2013-12-03 17:58:35 UTC (rev 7601) @@ -906,6 +906,11 @@ return m_store.getWriteCacheService(); } + @Override + public StoreState getStoreState() { + return m_store.getStoreState(); + } + // @Override // public boolean isFlushed() { // return m_store.isFlushed(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2013-11-27 14:24:34 UTC (rev 7600) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2013-12-03 17:58:35 UTC (rev 7601) @@ -2942,6 +2942,26 @@ // m_rebuildSequence++; } + @Override + public StoreState getStoreState() { + return new WormStoreState(); + } + + public static class WormStoreState implements StoreState { + + private static final long serialVersionUID = 1L; + + @Override + public boolean equals(final Object obj) { + if (obj == null || !(obj instanceof WormStoreState)) + return false; + final WormStoreState other = (WormStoreState) obj; + // Nothing to compare. + return true; + } + + } + // @Override // public void prepareForRebuild(HARebuildRequest req) { // assert m_rebuildRequest == null; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2013-11-27 14:24:34 UTC (rev 7600) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2013-12-03 17:58:35 UTC (rev 7601) @@ -180,6 +180,8 @@ m_store.showWriteCacheDebug(paddr); + log.warn("Physical address " + paddr + " not accessible for Allocator of size " + m_size); + return 0L; } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2013-11-27 14:24:34 UTC (rev 7600) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2013-12-03 17:58:35 UTC (rev 7601) @@ -100,6 +100,7 @@ import com.bigdata.journal.IHABufferStrategy; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.RootBlockView; +import com.bigdata.journal.StoreState; import com.bigdata.journal.StoreTypeEnum; import com.bigdata.quorum.Quorum; import com.bigdata.quorum.QuorumException; @@ -1219,31 +1220,44 @@ } - /* - * Utility to encapsulate RootBlock interpreation + /** + * Utility to encapsulate RootBlock interpretation. */ - static class RootBlockInfo { + static private class RootBlockInfo { - static int nextAllocation(final IRootBlockView rb) { - final long nxtOffset = rb.getNextOffset(); - - // next allocation to be made (in -32K units). - final int ret = -(int) (nxtOffset >> 32); - - /* - * Skip the first 32K in the file. The root blocks live here but - * nothing else. - */ - return ret == 0 ? -(1 + META_ALLOCATION) : ret; - } +// int nextAllocation(final IRootBlockView rb) { +// final long nxtOffset = rb.getNextOffset(); +// +// // next allocation to be made (in -32K units). +// final int ret = -(int) (nxtOffset >> 32); +// +// /* +// * Skip the first 32K in the file. The root blocks live here but +// * nothing else. +// */ +// return ret == 0 ? -(1 + META_ALLOCATION) : ret; +// } - /* + /** + * Used to transparently re-open the backing channel if it has been closed + * by an interrupt during an IO. + */ + private final ReopenFileChannel m_reopener; + /** * Meta-Allocations stored as {int address; int[8] bits}, so each block * holds 8*32=256 allocation slots of 1K totaling 256K. - * + * <p> * The returned int array is a flattened list of these int[9] blocks */ - static int[] metabits(final IRootBlockView rb, final ReopenFileChannel reopener) throws IOException { + private final int[] m_metabits; + private final long m_storageStatsAddr; + private final long m_lastDeferredReleaseTime; + + RootBlockInfo(final IRootBlockView rb, + final ReopenFileChannel reopener) throws IOException { + + this.m_reopener = reopener; + final long rawmbaddr = rb.getMetaBitsAddr(); /* @@ -1265,17 +1279,17 @@ */ final byte[] buf = new byte[metaBitsStore * 4]; - FileChannelUtility.readAll(reopener, ByteBuffer.wrap(buf), pmaddr); + FileChannelUtility.readAll(m_reopener, ByteBuffer.wrap(buf), pmaddr); final DataInputStream strBuf = new DataInputStream(new ByteArrayInputStream(buf)); // Can handle minor store version incompatibility strBuf.readInt(); // STORE VERSION - strBuf.readLong(); // Last Deferred Release Time + m_lastDeferredReleaseTime = strBuf.readLong(); // Last Deferred Release Time strBuf.readInt(); // cDefaultMetaBitsSize final int allocBlocks = strBuf.readInt(); - strBuf.readLong(); // m_storageStatsAddr + m_storageStatsAddr = strBuf.readLong(); // m_storageStatsAddr // step over those reserved ints for (int i = 0; i < cReservedMetaBits; i++) { @@ -1291,7 +1305,7 @@ // Must be multiple of 9 assert metaBitsSize % 9 == 0; - int[] ret = new int[metaBitsSize]; + final int[] ret = new int[metaBitsSize]; for (int i = 0; i < metaBitsSize; i++) { ret[i] = strBuf.readInt(); } @@ -1300,8 +1314,9 @@ * Meta-Allocations stored as {int address; int[8] bits}, so each block * holds 8*32=256 allocation slots of 1K totaling 256K. */ - return ret; + m_metabits = ret; } + } /** @@ -3157,6 +3172,13 @@ log.trace("commitChanges for: " + m_nextAllocation + ", " + m_metaBitsAddr + ", active contexts: " + m_contexts.size()); + + if (log.isDebugEnabled() && m_quorum.isHighlyAvailable()) { + + log.debug(showAllocatorList()); + + } + } /** @@ -6216,14 +6238,40 @@ log.trace("Allocator " + index + ", size: " + xfa.m_size + ", startAddress: " + xfa.getStartAddr() + ", allocated: " + (xfa.getAllocatedSlots()/xfa.m_size)); } } - + + // Update m_metaBits addr and m_nextAllocation to ensure able to allocate as well as read! + { + final long nxtOffset = rbv.getNextOffset(); + + // next allocation to be made (in -32K units). + m_nextAllocation = -(int) (nxtOffset >> 32); + + if (m_nextAllocation == 0) { + throw new IllegalStateException("Invalid state for non-empty store"); + } + + m_committedNextAllocation = m_nextAllocation; + + final long savedMetaBitsAddr = m_metaBitsAddr; + // latched offset of the metabits region. + m_metaBitsAddr = -(int) nxtOffset; + + if (savedMetaBitsAddr != m_metaBitsAddr) + log.warn("Old metaBitsAddr: " + savedMetaBitsAddr + ", new metaBitsAddr: " + m_metaBitsAddr); + } + final ArrayList<FixedAllocator> nallocs = new ArrayList<FixedAllocator>(); // current metabits final int[] oldmetabits = m_metaBits; // new metabits - m_metaBits = RootBlockInfo.metabits(rbv, m_reopener); + final RootBlockInfo rbi = new RootBlockInfo(rbv, m_reopener); + m_metaBits = rbi.m_metabits; + // and grab the last deferred release and storageStats! + m_lastDeferredReleaseTime = rbi.m_lastDeferredReleaseTime; + m_storageStatsAddr = rbi.m_storageStatsAddr; + if(log.isTraceEnabled()) log.trace("Metabits length: " + m_metaBits.length); @@ -6925,6 +6973,16 @@ } + private String showAllocatorList() { + final StringBuilder sb = new StringBuilder(); + + for (int index = 0; index < m_allocs.size(); index++) { + final FixedAllocator xfa = m_allocs.get(index); + sb.append("Allocator " + index + ", size: " + xfa.m_size + ", startAddress: " + xfa.getStartAddr() + ", allocated: " + xfa.getAllocatedSlots() + "\n"); + } + + return sb.toString(); + } // /** // * // * @return whether WCS is flushed @@ -6935,6 +6993,79 @@ // return this.m_writeCacheService.isFlushed(); // } + public static class RWStoreState implements StoreState { + + /** + * Generated ID + */ + private static final long serialVersionUID = 4315400143557397323L; + + /* + * Transient state necessary for consistent ha leader transition + */ + private final int m_fileSize; + private final int m_nextAllocation; + private final int m_committedNextAllocation; + private final long m_minReleaseAge; + private final long m_lastDeferredReleaseTime; + private final long m_storageStatsAddr; + private final int m_allocsSize; + private final int m_metaBitsAddr; + private final int m_metaBitsSize; + + private RWStoreState(final RWStore store) { + m_fileSize = store.m_fileSize; + m_nextAllocation = store.m_nextAllocation; + m_committedNextAllocation = store.m_committedNextAllocation; + m_minReleaseAge = store.m_minReleaseAge; + m_lastDeferredReleaseTime = store.m_lastDeferredReleaseTime; + m_storageStatsAddr = store.m_storageStatsAddr; + m_allocsSize = store.m_allocs.size(); + m_metaBitsAddr = store.m_metaBitsAddr; + m_metaBitsSize = store.m_metaBits.length; + } + + @Override + public boolean equals(final Object obj) { + if (obj == null || !(obj instanceof RWStoreState)) + return false; + final RWStoreState other = (RWStoreState) obj; + return m_fileSize == other.m_fileSize + && m_nextAllocation == other.m_nextAllocation + && m_committedNextAllocation == other.m_committedNextAllocation + && m_minReleaseAge == other.m_minReleaseAge + && m_lastDeferredReleaseTime == other.m_lastDeferredReleaseTime + && m_storageStatsAddr == other.m_storageStatsAddr + && m_allocsSize == other.m_allocsSize + && m_metaBitsAddr == other.m_metaBitsAddr + && m_metaBitsSize == other.m_metaBitsSize; + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder(); + + sb.append("RWStoreState\n"); + sb.append("fileSize: " + m_fileSize + "\n"); + sb.append("nextAllocation: " + m_nextAllocation + "\n"); + sb.append("committedNextAllocation: " + m_committedNextAllocation + "\n"); + sb.append("minReleaseAge: " + m_minReleaseAge + "\n"); + sb.append("lastDeferredReleaseTime: " + m_lastDeferredReleaseTime + "\n"); + sb.append("storageStatsAddr: " + m_storageStatsAddr + "\n"); + sb.append("allocsSize: " + m_allocsSize + "\n"); + sb.append("metaBitsAddr: " + m_metaBitsAddr + "\n"); + sb.append("metaBitsSize: " + m_metaBitsSize + "\n"); + + return sb.toString(); + } + } + + public StoreState getStoreState() { + final RWStoreState ret = new RWStoreState(this); + + return ret; + } + // public void prepareForRebuild(final HARebuildRequest req) { // assert m_rebuildRequest == null; // Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-11-27 14:24:34 UTC (rev 7600) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-12-03 17:58:35 UTC (rev 7601) @@ -85,6 +85,7 @@ import com.bigdata.jini.util.ConfigMath; import com.bigdata.jini.util.JiniUtil; import com.bigdata.journal.IRootBlockView; +import com.bigdata.journal.StoreState; import com.bigdata.journal.jini.ha.HAJournalServer.ConfigurationOptions; import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; import com.bigdata.quorum.AbstractQuorumClient; @@ -2688,6 +2689,39 @@ } + protected void assertStoreStates(final HAGlue[] services) throws IOException { + if (services.length < 2) + return; // nothing to compare + + final StoreState test = ((HAGlueTest) services[0]).getStoreState(); + final String tname = serviceName(services[0]); + + for (int s = 1; s < services.length; s++) { + final StoreState other = ((HAGlueTest) services[s]).getStoreState(); + + if (!test.equals(other)) { + final String oname = serviceName(services[s]); + final String msg = "StoreState mismatch \n" + tname + "\n" + + test.toString() + "\n" + oname + "\n" + + other.toString(); + fail(msg); + } + } + } + + protected String serviceName(final HAGlue s) { + if (s == serverA) { + return "serverA"; + } else if (s == serverB) { + return "serverB"; + } else if (s == serverC) { + return "serverC"; + } else { + return "NA"; + } + } + + /** * Task loads a large data set. */ @@ -2695,6 +2729,7 @@ private final long token; private final boolean reallyLargeLoad; + private final boolean dropAll; /** * Large load. @@ -2708,6 +2743,10 @@ } + + public LargeLoadTask(long token, boolean reallyLargeLoad) { + this(token, reallyLargeLoad, true/*dropAll*/); + } /** * Either large or really large load. * @@ -2716,17 +2755,20 @@ * @param reallyLargeLoad * if we will also load the 3 degrees of freedom file. */ - public LargeLoadTask(final long token, final boolean reallyLargeLoad) { + public LargeLoadTask(final long token, final boolean reallyLargeLoad, final boolean dropAll) { this.token = token; this.reallyLargeLoad = reallyLargeLoad; + this.dropAll = dropAll; + } public Void call() throws Exception { final StringBuilder sb = new StringBuilder(); + if (dropAll) sb.append("DROP ALL;\n"); sb.append("LOAD <" + getFoafFileUrl("data-0.nq.gz") + ">;\n"); sb.append("LOAD <" + getFoafFileUrl("data-1.nq.gz") + ">;\n"); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2013-11-27 14:24:34 UTC (rev 7600) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2013-12-03 17:58:35 UTC (rev 7601) @@ -84,8 +84,10 @@ import com.bigdata.ha.msg.IHAWriteSetStateRequest; import com.bigdata.ha.msg.IHAWriteSetStateResponse; import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.IHABufferStrategy; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.ITx; +import com.bigdata.journal.StoreState; import com.bigdata.journal.jini.ha.HAJournalServer.HAQuorumService; import com.bigdata.journal.jini.ha.HAJournalServer.RunStateEnum; import com.bigdata.quorum.AsynchronousQuorumCloseException; @@ -308,6 +310,11 @@ */ public void simpleTransaction_abort() throws IOException, Exception; + /** + * Supports consistency checking between HA services + */ + public StoreState getStoreState() throws IOException; + } /** @@ -1164,6 +1171,12 @@ } + @Override + public StoreState getStoreState() throws IOException { + return ((IHABufferStrategy) (getIndexManager().getBufferStrategy())) + .getStoreState(); + } + // @Override // public Future<Void> dropZookeeperConnection() throws IOException { // Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java 2013-11-27 14:24:34 UTC (rev 7600) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java 2013-12-03 17:58:35 UTC (rev 7601) @@ -1,34 +1,21 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2013. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ package com.bigdata.journal.jini.ha; +import java.util.Random; +import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; import com.bigdata.ha.HAGlue; +import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; public class TestHA3ChangeLeader extends AbstractHA3JournalServerTestCase { + public TestHA3ChangeLeader() { + } + + public TestHA3ChangeLeader(String name) { + super(name); + } + /** * We have seen problems with updates when the leader changes, this test reconstructs * this simple scenario, with and update transaction, change of leader and then a @@ -38,6 +25,32 @@ */ public void testStartABC_ChangeLeader() throws Exception { + doStartABC_ChangeLeader(1); + } + + public void testStartABC_ChangeLeader_2Trans() throws Exception { + + doStartABC_ChangeLeader(2); + } + + public void testStartABC_ChangeLeader_3Trans() throws Exception { + + doStartABC_ChangeLeader(3); + } + + public void testStartABC_ChangeLeader_RandomTrans() throws Exception { + final Random r = new Random(); + final int ntrans = r.nextInt(900); + try { + doStartABC_ChangeLeader(ntrans); + } catch (Exception e) { + log.error("Problem with " + ntrans + " transactions"); + throw e; + } + } + + public void doStartABC_ChangeLeader(final int ntrans) throws Exception { + // Start 3 services final HAGlue serverA = startA(); final HAGlue serverB = startB(); @@ -53,19 +66,131 @@ awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); /* - * Now go through a commit point with a met quorum. The HALog - * files should be retained at that commit point. + * Now go through sevearl commit points with a met quorum. The HALog + * files should be retained at the final commit point. */ - simpleTransaction(); + for (int t = 0; t < ntrans; t++) { + simpleTransaction(); + } shutdownA(); final long token2 = awaitNextQuorumMeet(token1); + + // let's commit several transactions with the new leader + for (int t = 0; t < 20; t++) { + simpleTransaction(); + // Check store states + assertStoreStates(new HAGlue[] { serverB, serverC }); + } + + // And again verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverB, serverC }); + + assertTrue(token2 == quorum.token()); + } + + public void testStartABC_KillLeader_RandomTrans() throws Exception { + fail("Test disabled pending reconcilation of socket ticket"); + final Random r = new Random(); + final int ntrans = r.nextInt(900); + try { + doStartABC_KillLeader(ntrans); + } catch (Exception e) { + log.error("Problem with " + ntrans + " transactions"); + throw e; + } + } + + private void doStartABC_KillLeader(final int ntrans) throws Exception { + + // Start 3 services + final HAGlue serverA = startA(); + final HAGlue serverB = startB(); + final HAGlue serverC = startC(); + + // Wait for a quorum meet. + final long token1 = awaitFullyMetQuorum(); + + // await pipeline + awaitPipeline(20, TimeUnit.SECONDS, new HAGlue[] { serverA, serverB, + serverC }); + + awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); + + /* + * Now go through sevearl commit points with a met quorum. The HALog + * files should be retained at the final commit point. + */ + for (int t = 0; t < ntrans; t++) { + simpleTransaction(); + + // Check store states + assertStoreStates(new HAGlueTest[] { (HAGlueTest) serverA, (HAGlueTest) serverB, (HAGlueTest) serverC }); + } + + kill(serverA); + + final long token2 = awaitNextQuorumMeet(token1); + // let's commit several transactions with the new leader + for (int t = 0; t < 20; t++) { + simpleTransaction(); + } + + // And again verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverB, serverC }); + + assertStoreStates(new HAGlueTest[] { (HAGlueTest) serverB, (HAGlueTest) serverC }); + assertTrue(token2 == quorum.token()); + } + /** + * Similar to ChangeLeader but with a LargeLoad + */ + public void _testStartABC_StressChangeLeader() throws Exception { + + // Start 3 services + final HAGlue serverA = startA(); + final HAGlue serverB = startB(); + final HAGlue serverC = startC(); + + // Wait for a quorum meet. + final long token1 = awaitFullyMetQuorum(); + + // await pipeline + awaitPipeline(20, TimeUnit.SECONDS, new HAGlue[] { serverA, serverB, + serverC }); + + awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); + + /* + * LOAD data on leader. + */ + for (int i = 0; i < 100; i++) { + final FutureTask<Void> ft = new FutureTask<Void>(new LargeLoadTask( + token1, true/* reallyLargeLoad */, false/*dropAll*/)); + + // Start LOAD. + executorService.submit(ft); + + // Await LOAD, but with a timeout. + ft.get(longLoadTimeoutMillis, TimeUnit.MILLISECONDS); + } + + assertStoreStates(new HAGlue[] { serverA, serverB, serverC }); + + shutdownA(); + + final long token2 = awaitNextQuorumMeet(token1); + simpleTransaction(); // And again verify binary equality of ALL journals. assertDigestsEquals(new HAGlue[] { serverB, serverC }); + + assertStoreStates(new HAGlue[] { serverB, serverC }); + + assertTrue(token2 == quorum.token()); } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3DumpLogs.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3DumpLogs.java 2013-11-27 14:24:34 UTC (rev 7600) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3DumpLogs.java 2013-12-03 17:58:35 UTC (rev 7601) @@ -32,6 +32,12 @@ import com.bigdata.ha.HAGlue; import com.bigdata.ha.HAStatusEnum; +/** + * FIXME This test suite has known limitations and the utility class that it + * tests needs a code review and revision. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ public class TestHA3DumpLogs extends AbstractHA3JournalServerTestCase { @Override This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2013-11-27 14:24:42
|
Revision: 7600 http://bigdata.svn.sourceforge.net/bigdata/?rev=7600&view=rev Author: martyncutcher Date: 2013-11-27 14:24:34 +0000 (Wed, 27 Nov 2013) Log Message: ----------- Adding missing Unit tests! Modified Paths: -------------- branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java Added Paths: ----------- branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JustKills.java branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestStressKill.java Added: branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java (rev 0) +++ branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java 2013-11-27 14:24:34 UTC (rev 7600) @@ -0,0 +1,191 @@ +package com.bigdata.journal.jini.ha; + +import java.util.Random; +import java.util.concurrent.FutureTask; +import java.util.concurrent.TimeUnit; + +import com.bigdata.ha.HAGlue; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.LargeLoadTask; +import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; + +public class TestHA3ChangeLeader extends AbstractHA3JournalServerTestCase { + + public TestHA3ChangeLeader() { + } + + public TestHA3ChangeLeader(String name) { + super(name); + } + + /** + * We have seen problems with updates when the leader changes, this test reconstructs + * this simple scenario, with and update transaction, change of leader and then a + * second update transaction. + * + * @throws Exception + */ + public void testStartABC_ChangeLeader() throws Exception { + + doStartABC_ChangeLeader(1); + } + + public void testStartABC_ChangeLeader_3Trans() throws Exception { + + doStartABC_ChangeLeader(2); + } + + public void testStartABC_ChangeLeader_RandomTrans() throws Exception { + final Random r = new Random(); + final int ntrans = r.nextInt(900); + try { + doStartABC_ChangeLeader(ntrans); + } catch (Exception e) { + System.err.println("Problem with " + ntrans + " transactions"); + throw e; + } + } + + public void doStartABC_ChangeLeader(final int ntrans) throws Exception { + + // Start 3 services + final HAGlue serverA = startA(); + final HAGlue serverB = startB(); + final HAGlue serverC = startC(); + + // Wait for a quorum meet. + final long token1 = awaitFullyMetQuorum(); + + // await pipeline + awaitPipeline(20, TimeUnit.SECONDS, new HAGlue[] { serverA, serverB, + serverC }); + + awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); + + /* + * Now go through sevearl commit points with a met quorum. The HALog + * files should be retained at the final commit point. + */ + for (int t = 0; t < ntrans; t++) { + simpleTransaction(); + } + + shutdownA(); + + final long token2 = awaitNextQuorumMeet(token1); + + // let's commit several transactions with the new leader + for (int t = 0; t < 20; t++) { + simpleTransaction(); + // Check store states + assertStoreStates(new HAGlue[] { serverA, serverB, serverC }); + } + + // And again verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverB, serverC }); + + assertTrue(token2 == quorum.token()); + } + + public void testStartABC_KillLeader_RandomTrans() throws Exception { + final Random r = new Random(); + final int ntrans = r.nextInt(900); + try { + doStartABC_KillLeader(ntrans); + } catch (Exception e) { + System.err.println("Problem with " + ntrans + " transactions"); + throw e; + } + } + + public void doStartABC_KillLeader(final int ntrans) throws Exception { + + // Start 3 services + final HAGlue serverA = startA(); + final HAGlue serverB = startB(); + final HAGlue serverC = startC(); + + // Wait for a quorum meet. + final long token1 = awaitFullyMetQuorum(); + + // await pipeline + awaitPipeline(20, TimeUnit.SECONDS, new HAGlue[] { serverA, serverB, + serverC }); + + awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); + + /* + * Now go through sevearl commit points with a met quorum. The HALog + * files should be retained at the final commit point. + */ + for (int t = 0; t < ntrans; t++) { + simpleTransaction(); + + // Check store states + assertStoreStates(new HAGlueTest[] { (HAGlueTest) serverA, (HAGlueTest) serverB, (HAGlueTest) serverC }); + } + + kill(serverA); + + final long token2 = awaitNextQuorumMeet(token1); + + // let's commit several transactions with the new leader + for (int t = 0; t < 20; t++) { + simpleTransaction(); + } + + // And again verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverB, serverC }); + + assertStoreStates(new HAGlueTest[] { (HAGlueTest) serverB, (HAGlueTest) serverC }); + assertTrue(token2 == quorum.token()); + } + /* + * Similar to ChangeLeader but with a LargeLoad + */ + public void testStartABC_StressChangeLeader() throws Exception { + + // Start 3 services + final HAGlue serverA = startA(); + final HAGlue serverB = startB(); + final HAGlue serverC = startC(); + + // Wait for a quorum meet. + final long token1 = awaitFullyMetQuorum(); + + // await pipeline + awaitPipeline(20, TimeUnit.SECONDS, new HAGlue[] { serverA, serverB, + serverC }); + + awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); + + /* + * LOAD data on leader. + */ + for (int i = 0; i < 100; i++) { + final FutureTask<Void> ft = new FutureTask<Void>(new LargeLoadTask( + token1, true/* reallyLargeLoad */, false/*dropAll*/)); + + // Start LOAD. + executorService.submit(ft); + + // Await LOAD, but with a timeout. + ft.get(longLoadTimeoutMillis, TimeUnit.MILLISECONDS); + } + + assertStoreStates(new HAGlue[] { serverA, serverB, serverC }); + + shutdownA(); + + final long token2 = awaitNextQuorumMeet(token1); + + simpleTransaction(); + + // And again verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverB, serverC }); + + assertStoreStates(new HAGlue[] { serverB, serverC }); + + assertTrue(token2 == quorum.token()); + + } +} Modified: branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-11-27 14:23:29 UTC (rev 7599) +++ branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-11-27 14:24:34 UTC (rev 7600) @@ -194,7 +194,9 @@ awaitHAStatus(serverB, HAStatusEnum.Follower); awaitHAStatus(serverC, HAStatusEnum.Follower); - // The commit counter has not changed. + assertStoreStates(new HAGlue[] { serverA, serverB, serverC }); + + // The commit counter has not changed. assertEquals( lastCommitCounter, serverA.getRootBlock( @@ -543,9 +545,15 @@ // Verify binary equality of ALL journals. assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + // Check transient store states after resync + assertStoreStates(new HAGlue[] { serverA, serverB, serverC }); + // Now force further commit when fully met to remove log files simpleTransaction(); + // Check transient store states after commit + assertStoreStates(new HAGlue[] { serverA, serverB, serverC }); + // And again verify binary equality of ALL journals. assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); @@ -559,6 +567,46 @@ } /** + * We have seen problems with updates when the leader changes, this test reconstructs + * this simple scenario, with and update transaction, change of leader and then a + * second update transaction. + * + * @throws Exception + */ + public void testStartABC_ChangeLeader() throws Exception { + + // Start 3 services + final HAGlue serverA = startA(); + final HAGlue serverB = startB(); + final HAGlue serverC = startC(); + + // Wait for a quorum meet. + final long token1 = quorum.awaitQuorum(awaitQuorumTimeout, + TimeUnit.MILLISECONDS); + + // await pipeline + awaitPipeline(20, TimeUnit.SECONDS, new HAGlue[] {serverA, serverB, serverC}); + + /* + * Now go through a commit point with a met quorum. The HALog + * files should be retained at that commit point. + */ + simpleTransaction(); + + shutdownA(); + + final long token2 = quorum.awaitQuorum(awaitQuorumTimeout, + TimeUnit.MILLISECONDS); + assertFalse(token1 == token2); + + simpleTransaction(); + + // And again verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverB, serverC }); + + } + + /** * Unit test of the ability to go through a simultaneous restart of all * services once those services are no longer at commit point 0. Two * services will meet on the lastCommitTime. The third will need to RESYNC @@ -883,6 +931,8 @@ HAStatusEnum.Follower, HAStatusEnum.Follower }, new HAGlue[] { serverA, serverB, serverC }); + // Check store states + assertStoreStates(new HAGlue[] { serverA, serverB, serverC }); /* * Verify binary equality of ALL journals. * @@ -893,7 +943,7 @@ * could result in the leader not including the newly joined * follower in the 2-phase commit. */ - assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + assertStoreStates(new HAGlue[] { serverA, serverB, serverC }); // Now force further commit when fully met to remove log files simpleTransaction(); @@ -901,7 +951,10 @@ // And again verify binary equality of ALL journals. // assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); - // Now verify no HALog files since fully met quorum @ commit. + // Check store states + assertStoreStates(new HAGlue[] { serverA, serverB, serverC }); + + // Now verify no HALog files since fully met quorum @ commit. final long lastCommitCounter3 = leader .getRootBlock(new HARootBlockRequest(null/* storeUUID */)) .getRootBlock().getCommitCounter(); @@ -1019,6 +1072,8 @@ // And again verify binary equality of ALL journals. assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + assertStoreStates(new HAGlue[] { serverA, serverB, serverC }); + log.info("ALL GOOD!"); } @@ -1291,6 +1346,8 @@ assertHALogNotFound(0L/* firstCommitCounter */, lastCommitCounter3, new HAGlue[] { serverA, serverB, serverC }); + // Check store states + assertStoreStates(new HAGlueTest[] { (HAGlueTest) serverA, (HAGlueTest) serverB, (HAGlueTest) serverC }); } /** @@ -1639,6 +1696,7 @@ // Verify binary equality of ALL journals. assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + assertStoreStates(new HAGlue[] { serverA, serverB, serverC }); /* * Now go through a commit point with a fully met quorum. The HALog * files should be purged at that commit point. @@ -1676,6 +1734,8 @@ // ...and with original token assertTrue(token == quorum.token()); + + assertStoreStates(new HAGlue[] { serverA, serverB}); } /** @@ -1712,6 +1772,7 @@ assertTrue(leader.equals(startup.serverB) || leader.equals(startup.serverC)); + assertStoreStates(new HAGlue[] { startup.serverB, startup.serverC }); } /** @@ -1748,6 +1809,7 @@ assertTrue(leader.equals(startup.serverB) || leader.equals(startup.serverC)); + assertStoreStates(new HAGlue[] { startup.serverB, startup.serverC }); } /** @@ -2352,6 +2414,7 @@ assertDigestsEquals(new HAGlue[] { services.serverA, services.serverB, services.serverC }); + assertStoreStates(new HAGlue[] { services.serverA, services.serverB, services.serverC }); } finally { destroyAll(); @@ -2858,6 +2921,9 @@ // Verify quorum becomes fully met now that LOAD is done. assertEquals(token, awaitFullyMetQuorum()); + + // Check store states + assertStoreStates(new HAGlue[] { startup.serverA, startup.serverB, serverC2 }); } /** @@ -3033,6 +3099,8 @@ // Verify quorum becomes fully met now that LOAD is done. assertEquals(token, awaitFullyMetQuorum()); + // Check store states + assertStoreStates(new HAGlue[] { startup.serverA, serverB2, startup.serverC }); } /** @@ -3085,6 +3153,8 @@ // Verify fully met. assertTrue(quorum.isQuorumFullyMet(token)); + serverB2.awaitHAReady(awaitQuorumTimeout, TimeUnit.MILLISECONDS); + // Await LOAD, but with a timeout. ft.get(longLoadTimeoutMillis, TimeUnit.MILLISECONDS); @@ -3108,7 +3178,8 @@ // log.warn("Result Leader: " + resultLeader.next().getBinding("count")); // } - // assertDigestsEquals(new HAGlue[] { startup.serverA, serverB2, startup.serverC }); + // Check store states after load and final transaction + assertStoreStates(new HAGlue[] { startup.serverA, serverB2, startup.serverC }); } /** @@ -3480,7 +3551,7 @@ } - public void _testStressQuorumABC_HAStatusUpdatesWithFailovers() + public void testStressQuorumABC_HAStatusUpdatesWithFailovers() throws Exception { for (int i = 1; i <= 20; i++) { try { Added: branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JustKills.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JustKills.java (rev 0) +++ branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JustKills.java 2013-11-27 14:24:34 UTC (rev 7600) @@ -0,0 +1,151 @@ +package com.bigdata.journal.jini.ha; + +import java.util.concurrent.FutureTask; +import java.util.concurrent.TimeUnit; + +import com.bigdata.ha.HAGlue; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.ABC; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.LargeLoadTask; + +import net.jini.config.Configuration; +import junit.framework.TestCase; + +public class TestHA3JustKills extends AbstractHA3JournalServerTestCase { + + + /** + * {@inheritDoc} + * <p> + * Note: This overrides some {@link Configuration} values for the + * {@link HAJournalServer} in order to establish conditions suitable for + * testing the {@link ISnapshotPolicy} and {@link IRestorePolicy}. + */ + @Override + protected String[] getOverrides() { + + return new String[]{ +// "com.bigdata.journal.HAJournal.properties=" +TestHA3JournalServer.getTestHAJournalProperties(com.bigdata.journal.HAJournal.properties), + "com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy(0L,1,0)", + "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", +// "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"", + "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", + }; + + } + + public TestHA3JustKills() { + } + + public TestHA3JustKills(String name) { + super(name); + } + + /** + * Start A+B+C in strict sequence. Wait until the quorum fully meets. Start + * a long running LOAD. While the LOAD is running, sure kill C (the last + * follower). Verify that the LOAD completes successfully with the remaining + * services (A+B). + */ + public void testABC_LiveLoadRemainsMet_kill_C() throws Exception { + + // enforce join order + final ABC startup = new ABC(true /*sequential*/); + + final long token = awaitFullyMetQuorum(); + + // start concurrent task loads that continue until fully met + final FutureTask<Void> ft = new FutureTask<Void>(new LargeLoadTask( + token)); + + executorService.submit(ft); + + // allow load head start + Thread.sleep(300/* ms */); + + // Verify load is still running. + assertFalse(ft.isDone()); + + // Dump Zookeeper + log.warn("ZOOKEEPER\n" + dumpZoo()); + + kill(startup.serverC); + + awaitPipeline(20, TimeUnit.SECONDS, new HAGlue[] {startup.serverA, startup.serverB}); + + // token must remain unchanged to indicate same quorum + assertEquals(token, awaitMetQuorum()); + + awaitMembers(new HAGlue[] {startup.serverA, startup.serverB}); + awaitJoined(new HAGlue[] {startup.serverA, startup.serverB}); + + // token must remain unchanged to indicate same quorum + assertEquals(token, awaitMetQuorum()); + + // Await LOAD, but with a timeout. + ft.get(longLoadTimeoutMillis, TimeUnit.MILLISECONDS); + + // token must remain unchanged to indicate same quorum + assertEquals(token, awaitMetQuorum()); + + } + + public void testStressABC_LiveLoadRemainsMet_kill_C() throws Exception { + for (int i = 0; i < 5; i++) { + try { + testABC_LiveLoadRemainsMet_kill_C(); + } catch (Throwable t) { + fail("Run " + i, t); + } finally { + Thread.sleep(1000); + destroyAll(); + } + } + } + + /** + * Start A+B+C in strict sequence. Wait until the quorum fully meets. Start + * a long running LOAD. While the LOAD is running, sure kill B (the first + * follower). Verify that the LOAD completes successfully with the remaining + * services (A+C), after the leader re-orders the pipeline. + */ + public void testABC_LiveLoadRemainsMet_kill_B() throws Exception { + + // enforce join order + final ABC startup = new ABC(true /*sequential*/); + + final long token = awaitFullyMetQuorum(); + + // start concurrent task loads that continue until fully met + final FutureTask<Void> ft = new FutureTask<Void>(new LargeLoadTask( + token)); + + executorService.submit(ft); + + // allow load head start + Thread.sleep(300/* ms */); + + // Verify load is still running. + assertFalse(ft.isDone()); + + // Dump Zookeeper + log.warn("ZOOKEEPER\n" + dumpZoo()); + + kill(startup.serverB); + + awaitPipeline(10, TimeUnit.SECONDS, new HAGlue[] {startup.serverA, startup.serverC}); + + // also check members and joined + awaitMembers(new HAGlue[] {startup.serverA, startup.serverC}); + awaitJoined(new HAGlue[] {startup.serverA, startup.serverC}); + + // token must remain unchanged to indicate same quorum + assertEquals(token, awaitMetQuorum()); + + // Await LOAD, but with a timeout. + ft.get(longLoadTimeoutMillis, TimeUnit.MILLISECONDS); + + // token must remain unchanged to indicate same quorum + assertEquals(token, awaitMetQuorum()); + + } +} Added: branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestStressKill.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestStressKill.java (rev 0) +++ branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestStressKill.java 2013-11-27 14:24:34 UTC (rev 7600) @@ -0,0 +1,22 @@ +package com.bigdata.journal.jini.ha; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +public class TestStressKill extends TestCase { + + public static Test suite() + { + + final TestSuite suite = new TestSuite("TestStressKill"); + + // commitTime => (HALog|Snapshot)Record test suites. + for (int i = 0; i < 100; i++) { + suite.addTestSuite(TestHA3JustKills.class); + } + + return suite; + } + +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2013-11-27 14:23:38
|
Revision: 7599 http://bigdata.svn.sourceforge.net/bigdata/?rev=7599&view=rev Author: martyncutcher Date: 2013-11-27 14:23:29 +0000 (Wed, 27 Nov 2013) Log Message: ----------- Updates for pipeline resync and postHACommit for jenkins CI job submission Modified Paths: -------------- branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/btree/BTree.java branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/BasicBufferStrategy.java branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/DumpJournal.java branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/IBufferStrategy.java branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/RWStrategy.java branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/rwstore/sector/MemStrategy.java branches/PIPELINE_RESYNC/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java branches/PIPELINE_RESYNC/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/PIPELINE_RESYNC/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java Modified: branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/btree/BTree.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/btree/BTree.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/btree/BTree.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -1807,6 +1807,9 @@ final Checkpoint checkpoint; try { checkpoint = Checkpoint.load(store, addrCheckpoint); + + if (log.isDebugEnabled()) + log.debug("Checkpoint rootAddr: " + checkpoint.getRootAddr()); } catch (Throwable t) { throw new RuntimeException("Could not load Checkpoint: store=" + store + ", addrCheckpoint=" Modified: branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -1474,19 +1474,20 @@ } catch (Throwable t) { - final PipelineException pe = (PipelineException) InnerCause.getInnerCause(t, PipelineException.class); - if (pe != null) { - log.error("Really need to remove service " + pe.getProblemServiceId()); - final UUID psid = pe.getProblemServiceId(); - - try { - member.getActor().forceRemoveService(psid); - } catch (Exception e) { - log.warn("Problem on node removal", e); - - throw new RuntimeException(e); - } - } + // ORIGINAL TESTED GREEN for KillB and KillC +// final PipelineException pe = (PipelineException) InnerCause.getInnerCause(t, PipelineException.class); +// if (pe != null) { +// log.error("Really need to remove service " + pe.getProblemServiceId()); +// final UUID psid = pe.getProblemServiceId(); +// +// try { +// member.getActor().forceRemoveService(psid); +// } catch (Exception e) { +// log.warn("Problem on node removal", e); +// +// throw new RuntimeException(e); +// } +// } // Note: Also see retrySend()'s catch block. @@ -1560,7 +1561,25 @@ return; - } finally { + } catch (Exception t) { + + // THIS LOCATION WORKS! +// final PipelineException pe = (PipelineException) InnerCause.getInnerCause(t, PipelineException.class); +// if (pe != null) { +// log.error("Really need to remove service " + pe.getProblemServiceId()); +// final UUID psid = pe.getProblemServiceId(); +// +// try { +// member.getActor().forceRemoveService(psid); +// } catch (Exception e) { +// log.warn("Problem on node removal", e); +// +// throw new RuntimeException(e); +// } +// } + + throw t; + } finally { unlock(); @@ -1675,120 +1694,142 @@ * Task to send() a buffer to the follower. */ static private class SendBufferTask<S extends HAPipelineGlue> implements - Callable<Void> { + Callable<Void> { - private final QuorumMember<S> member; - private final long token; // member MUST remain leader for token. - private final IHASyncRequest req; - private final IHAWriteMessage msg; - private final ByteBuffer b; - private final PipelineState<S> downstream; - private final HASendService sendService; - private final Lock sendLock; + private final QuorumMember<S> member; + private final long token; // member MUST remain leader for token. + private final IHASyncRequest req; + private final IHAWriteMessage msg; + private final ByteBuffer b; + private final PipelineState<S> downstream; + private final HASendService sendService; + private final Lock sendLock; - public SendBufferTask(final QuorumMember<S> member, final long token, - final IHASyncRequest req, final IHAWriteMessage msg, - final ByteBuffer b, final PipelineState<S> downstream, - final HASendService sendService, final Lock sendLock) { + public SendBufferTask(final QuorumMember<S> member, final long token, + final IHASyncRequest req, final IHAWriteMessage msg, + final ByteBuffer b, final PipelineState<S> downstream, + final HASendService sendService, final Lock sendLock) { - this.member = member; - this.token = token; - this.req = req; // Note: MAY be null. - this.msg = msg; - this.b = b; - this.downstream = downstream; - this.sendService = sendService; - this.sendLock = sendLock; + this.member = member; + this.token = token; + this.req = req; // Note: MAY be null. + this.msg = msg; + this.b = b; + this.downstream = downstream; + this.sendService = sendService; + this.sendLock = sendLock; - } + } - public Void call() throws Exception { + public Void call() throws Exception { - /* - * Lock ensures that we do not have more than one request on the - * write pipeline at a time. - */ + /* + * Lock ensures that we do not have more than one request on the + * write pipeline at a time. + */ - sendLock.lock(); + sendLock.lock(); - try { + try { - doRunWithLock(); - - return null; - - } finally { - - sendLock.unlock(); - - } + doRunWithLock(); - } - - private void doRunWithLock() throws InterruptedException, - ExecutionException, IOException { + return null; - // Get Future for send() outcome on local service. - final Future<Void> futSnd = sendService.send(b, msg.getToken()); + } finally { - try { + sendLock.unlock(); - // Get Future for receive outcome on the remote service (RMI). - final Future<Void> futRec = downstream.service - .receiveAndReplicate(req, msg); + } - try { + } - /* - * Await the Futures, but spend more time waiting on the - * local Future and only check the remote Future every - * second. Timeouts are ignored during this loop. - */ - while (!futSnd.isDone() && !futRec.isDone()) { - /* - * Make sure leader's quorum token remains valid for ALL - * writes. - */ - member.assertLeader(token); - try { - futSnd.get(1L, TimeUnit.SECONDS); - } catch (TimeoutException ignore) { - } - try { - futRec.get(10L, TimeUnit.MILLISECONDS); - } catch (TimeoutException ignore) { - } - } - futSnd.get(); - futRec.get(); + private void doRunWithLock() throws InterruptedException, + ExecutionException, IOException { - } finally { - if (!futRec.isDone()) { - // cancel remote Future unless done. - futRec.cancel(true/* mayInterruptIfRunning */); - } - } + try { + // Get Future for send() outcome on local service. + final Future<Void> futSnd = sendService.send(b, msg.getToken()); - } catch (Throwable t) { - // check inner cause for downstream PipelineException - final PipelineException pe = (PipelineException) InnerCause.getInnerCause(t, PipelineException.class); - if (pe != null) { - throw pe; // throw it upstream - } - - // determine next pipeline service id - final UUID[] priorAndNext = member.getQuorum().getPipelinePriorAndNext(member.getServiceId()); - log.warn("Problem with downstream service: " + priorAndNext[1], t); - - throw new PipelineException(priorAndNext[1], t); - } finally { - // cancel the local Future. - futSnd.cancel(true/* mayInterruptIfRunning */); - } + try { - } - - } + // Get Future for receive outcome on the remote service + // (RMI). + final Future<Void> futRec = downstream.service + .receiveAndReplicate(req, msg); + + try { + + /* + * Await the Futures, but spend more time waiting on the + * local Future and only check the remote Future every + * second. Timeouts are ignored during this loop. + */ + while (!futSnd.isDone() && !futRec.isDone()) { + /* + * Make sure leader's quorum token remains valid for + * ALL writes. + */ + member.assertLeader(token); + try { + futSnd.get(1L, TimeUnit.SECONDS); + } catch (TimeoutException ignore) { + } + try { + futRec.get(10L, TimeUnit.MILLISECONDS); + } catch (TimeoutException ignore) { + } + } + futSnd.get(); + futRec.get(); + + } finally { + if (!futRec.isDone()) { + // cancel remote Future unless done. + futRec.cancel(true/* mayInterruptIfRunning */); + } + } + + } finally { + // cancel the local Future. + futSnd.cancel(true/* mayInterruptIfRunning */); + } + + } catch (Throwable t) { + // check inner cause for downstream PipelineException + final PipelineException pe = (PipelineException) InnerCause + .getInnerCause(t, PipelineException.class); + final UUID problemService; + if (pe != null) { + // throw pe; // throw it upstream - already should have been + // handled + problemService = pe.getProblemServiceId(); + } else { + final UUID[] priorAndNext = member.getQuorum() + .getPipelinePriorAndNext(member.getServiceId()); + problemService = priorAndNext[1]; + } + + // determine next pipeline service id + log.warn("Problem with downstream service: " + problemService, + t); + + // Carry out remedial work directly - BAD + log.error("Really need to remove service " + problemService); + + try { + member.getActor().forceRemoveService(problemService); + } catch (Exception e) { + log.warn("Problem on node removal", e); + + throw new RuntimeException(e); + } + + throw new PipelineException(problemService, t); + + } + } + } /** * Lock used to ensure that at most one message is being sent along the @@ -1934,8 +1975,8 @@ final HAMessageWrapper wrappedMsg = new HAMessageWrapper( req, msg); - // Get Future for send() outcome on local service. - final Future<Void> futSnd = receiveService.receiveData(wrappedMsg, + // Get Future for receive() outcome on local service. + final Future<Void> futRcv = receiveService.receiveData(wrappedMsg, b); try { @@ -1946,7 +1987,7 @@ // Verify token remains valid. member.getQuorum().assertQuorum(token); // Await the future. - return futSnd.get(1000, TimeUnit.MILLISECONDS); + return futRcv.get(1000, TimeUnit.MILLISECONDS); } catch (TimeoutException ex) { // Timeout. Ignore and retry loop. Thread.sleep(100/* ms */); @@ -1957,7 +1998,7 @@ } finally { // cancel the local Future. - futSnd.cancel(true/*mayInterruptIfRunning*/); + futRcv.cancel(true/*mayInterruptIfRunning*/); } @@ -1995,74 +2036,76 @@ this.receiveService = receiveService; } - public Void call() throws Exception { + public Void call() throws Exception { - // wrap the messages together. - final HAMessageWrapper wrappedMsg = new HAMessageWrapper( - req, msg); + // wrap the messages together. + final HAMessageWrapper wrappedMsg = new HAMessageWrapper(req, msg); - // Get Future for send() outcome on local service. - final Future<Void> futSnd = receiveService.receiveData(wrappedMsg, - b); + // Get Future for receive() outcome on local service. + final Future<Void> futRcv = receiveService.receiveData(wrappedMsg, + b); + try { + try { - try { + // Get future for receive outcome on the remote + // service. + final Future<Void> futDRcv = downstream.service + .receiveAndReplicate(req, msg); - // Get future for receive outcome on the remote - // service. - final Future<Void> futRec = downstream.service - .receiveAndReplicate(req, msg); + try { - try { + /* + * Await the Futures, but spend more time waiting on the + * local Future and only check the remote Future every + * second. Timeouts are ignored during this loop. + */ + while (!futRcv.isDone() && !futDRcv.isDone()) { + /* + * The token must remain valid, even if this service + * is not joined with the met quorum. If fact, + * services MUST replicate writes regardless of + * whether or not they are joined with the met + * quorum, but only while there is a met quorum. + */ + member.getQuorum().assertQuorum(token); + try { + futRcv.get(1L, TimeUnit.SECONDS); + } catch (TimeoutException ignore) { + } + try { + futDRcv.get(10L, TimeUnit.MILLISECONDS); + } catch (TimeoutException ignore) { + } + } + futRcv.get(); + futDRcv.get(); - /* - * Await the Futures, but spend more time - * waiting on the local Future and only check - * the remote Future every second. Timeouts are - * ignored during this loop. - */ - while (!futSnd.isDone() && !futRec.isDone()) { - /* - * The token must remain valid, even if this service is - * not joined with the met quorum. If fact, services - * MUST replicate writes regardless of whether or not - * they are joined with the met quorum, but only while - * there is a met quorum. - */ - member.getQuorum().assertQuorum(token); - try { - futSnd.get(1L, TimeUnit.SECONDS); - } catch (TimeoutException ignore) { - } - try { - futRec.get(10L, TimeUnit.MILLISECONDS); - } catch (TimeoutException ignore) { - } - } - futSnd.get(); - futRec.get(); + } finally { + if (!futDRcv.isDone()) { + // cancel remote Future unless done. + futDRcv.cancel(true/* mayInterruptIfRunning */); + } + } - } finally { - if (!futRec.isDone()) { - // cancel remote Future unless done. - futRec - .cancel(true/* mayInterruptIfRunning */); - } - } + } finally { + // Is it possible that this cancel conflicts with throwing + // the PipelineException? + // cancel the local Future. + futRcv.cancel(true/* mayInterruptIfRunning */); + } + } catch (Throwable t) { + // determine next pipeline service id + final UUID[] priorAndNext = member.getQuorum() + .getPipelinePriorAndNext(member.getServiceId()); + log.warn("Problem with downstream service: " + priorAndNext[1], + t); - } catch (Throwable t) { - // determine next pipeline service id - final UUID[] priorAndNext = member.getQuorum().getPipelinePriorAndNext(member.getServiceId()); - log.warn("Problem with downstream service: " + priorAndNext[1], t); - - throw new PipelineException(priorAndNext[1], t); - } finally { - // cancel the local Future. - futSnd.cancel(true/* mayInterruptIfRunning */); - } + throw new PipelineException(priorAndNext[1], t); + } - // done - return null; - } + // done + return null; + } } Modified: branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -27,14 +27,18 @@ import java.io.IOException; import java.net.BindException; import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; import java.nio.ByteBuffer; import java.nio.channels.AsynchronousCloseException; +import java.nio.channels.ClosedChannelException; import java.nio.channels.SelectionKey; import java.nio.channels.Selector; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.Iterator; import java.util.Set; +import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; @@ -52,6 +56,7 @@ import org.apache.log4j.Logger; import com.bigdata.btree.BytesUtil; +import com.bigdata.ha.PipelineException; import com.bigdata.ha.QuorumPipelineImpl; import com.bigdata.ha.msg.IHASyncRequest; import com.bigdata.ha.msg.IHAWriteMessage; @@ -962,8 +967,35 @@ boolean success = false; try { - doReceiveAndReplicate(client); - success = true; + while (!success) { + try { + log.warn("Receiving"); + doReceiveAndReplicate(client); + log.warn("DONE"); + success = true; + } catch (ClosedChannelException cce) { // closed then re-open + + final ServerSocket socket = server.socket(); + + log.warn("Closed upstream? " + socket.getChannel().isOpen(), cce); + + socket.bind(socket.getLocalSocketAddress()); + + server.socket().getChannel().isOpen(); + + awaitAccept(); + + log.warn("Creating new client"); + + client = new Client(server);//, sendService, addrNext); + + // save off reference and round we go + clientRef.set(client); + } catch (Throwable t) { + log.warn("Unexpected Error", t); + throw new RuntimeException(t); + } + } // success. return null; } finally { @@ -1110,7 +1142,8 @@ final int rdLen = client.client.read(tokenBB); for (int i = 0; i < rdLen; i++) { if (tokenBuffer[i] != token[tokenIndex]) { - log.warn("TOKEN MISMATCH"); + if (ntokenreads < 2) + log.warn("TOKEN MISMATCH"); tokenIndex = 0; if (tokenBuffer[i] == token[tokenIndex]) { tokenIndex++; @@ -1222,7 +1255,7 @@ } catch(Throwable t) { log.warn("Send downstream error", t); - throw new RuntimeException(t); + throw new RuntimeException(t); } } break; Modified: branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -509,7 +509,7 @@ while (nwritten < remaining) { - log.warn("TOKEN: " + BytesUtil.toHexString(token) + ", written: " + (token == null ? false : ntoken == token.length)); + // log.warn("TOKEN: " + BytesUtil.toHexString(token) + ", written: " + (token == null ? false : ntoken == token.length)); if (token != null && ntoken < token.length) { final ByteBuffer tokenBB = ByteBuffer.wrap(token); tokenBB.position(ntoken); @@ -551,10 +551,26 @@ * buffer. */ - final int nbytes = socketChannel.write(data); + final int nbytes; + if (log.isDebugEnabled()) { // add debug latency + final int limit = data.limit(); + if (data.position() < (limit-50000)) { + data.limit(data.position()+50000); + } + nbytes = socketChannel.write(data); + data.limit(limit); + + nwritten += nbytes; + log.debug("Written " + nwritten + " of total " + data.limit()); + + if (nwritten < limit) { + Thread.sleep(2); + } + } else { + nbytes = socketChannel.write(data); + nwritten += nbytes; + } - nwritten += nbytes; - if (log.isTraceEnabled()) log.trace("Sent " + nbytes + " bytes with " + nwritten + " of out " + remaining + " written so far"); @@ -729,4 +745,21 @@ } + public void resetSocket() { + try { + final SocketChannel socketChannel = this.socketChannel.get(); + if (socketChannel != null) { + try { + socketChannel.close(); + } catch (IOException ex) { + log.error("Ignoring exception during reetSocket: " + ex, ex); + } finally { + this.socketChannel.set(null); + } + } + } finally { + reopenChannel(); + } + } + } Modified: branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -39,6 +39,7 @@ import org.apache.log4j.Logger; import com.bigdata.io.FileChannelUtility; +import com.bigdata.journal.jini.ha.HAJournalTest.StoreState; import com.bigdata.mdi.IResourceMetadata; import com.bigdata.rawstore.AbstractRawWormStore; import com.bigdata.rawstore.Bytes; @@ -687,4 +688,14 @@ // public boolean isFlushed() { // return true; // } + + + /** + * Default StoreState implementation to be overridden + * as appropriate. + */ + synchronized public StoreState getStoreState() { + return new StoreState(); + } + } Modified: branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -3380,7 +3380,9 @@ metaStartAddr, metaBitsAddr, old.getStoreType(), old.getCreateTime(), old.getCloseTime(), old.getVersion(), store.checker); - + + + log.warn("CommitRecordIndexAddr: " + commitRecordIndexAddr + ", strategy: " + _bufferStrategy.getClass() + ", physicalAddress: " + _bufferStrategy.getPhysicalAddress(commitRecordIndexAddr)); } /** @@ -3545,7 +3547,18 @@ private void commitHA() { try { + + if (log.isDebugEnabled()) { + final long rootAddr = store._commitRecordIndex.getRootAddr(); + log.debug("CommitRecordIndex RootAddr: " + rootAddr + ", physical address: " + store.getPhysicalAddress(rootAddr)); + + if (_bufferStrategy instanceof IRWStrategy) { + final RWStore rwstore = ((RWStrategy) _bufferStrategy).getStore(); + log.debug(rwstore.showAllocatorList()); + } + } + if(!prepare2Phase()) { // PREPARE rejected. @@ -4623,6 +4636,8 @@ * * Note: For this code path we DO NOT cache the index view. */ + if (log.isDebugEnabled()) + log.debug("reading CommitRecordIndex from PhysicalAddress: " + _bufferStrategy.getPhysicalAddress(addr)); ndx = (CommitRecordIndex) BTree.load(this, addr, false/* readOnly */); @@ -7589,6 +7604,9 @@ } + if (log.isDebugEnabled()) + log.debug("RBV with CommitRecordIndex at PhysicalAddress: " + _bufferStrategy.getPhysicalAddress(rootBlock.getCommitRecordIndexAddr())); + } // doInnerRun() } // Commit2PhaseTask Modified: branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/BasicBufferStrategy.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/BasicBufferStrategy.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/BasicBufferStrategy.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -30,6 +30,7 @@ import com.bigdata.counters.CounterSet; import com.bigdata.io.FileChannelUtility; +import com.bigdata.journal.jini.ha.HAJournalTest.StoreState; /** * Implements logic to read from and write on a buffer. This is sufficient Modified: branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/DumpJournal.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/DumpJournal.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/DumpJournal.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -381,6 +381,17 @@ final boolean dumpHistory, final boolean dumpPages, final boolean dumpIndices, final boolean showTuples) { + /** + * Start a transaction. This will bracket all index access and protect + * the data on the journal from concurrent recycling. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/762"> + * DumpJournal does not protect against concurrent updates (NSS) + * </a> + */ + final long tx = journal.newTx(ITx.READ_COMMITTED); + try { + final FileMetadata fmd = journal.getFileMetadata(); if (fmd != null) { @@ -600,6 +611,9 @@ dumpPages, dumpIndices, showTuples); } + } finally { + journal.abort(tx); + } } @@ -614,7 +628,7 @@ } - public void dumpGlobalRowStore(final PrintWriter out) { + private void dumpGlobalRowStore(final PrintWriter out) { final SparseRowStore grs = journal.getGlobalRowStore(journal .getLastCommitTime()); @@ -826,7 +840,7 @@ * * @return */ - public String dumpRawRecord(final long addr) { + private String dumpRawRecord(final long addr) { if (journal.getBufferStrategy() instanceof IRWStrategy) { /** @@ -984,6 +998,7 @@ } } case Stream: + @SuppressWarnings("unused") final Stream stream = (Stream) ndx; /* * Note: We can't do anything here with a Stream, but we do @@ -1004,41 +1019,4 @@ } - /** - * Return the data in the buffer. - */ - public static byte[] getBytes(ByteBuffer buf) { - - if (buf.hasArray() && buf.arrayOffset() == 0 && buf.position() == 0 - && buf.limit() == buf.capacity()) { - - /* - * Return the backing array. - */ - - return buf.array(); - - } - - /* - * Copy the expected data into a byte[] using a read-only view on the - * buffer so that we do not mess with its position, mark, or limit. - */ - final byte[] a; - { - - buf = buf.asReadOnlyBuffer(); - - final int len = buf.remaining(); - - a = new byte[len]; - - buf.get(a); - - } - - return a; - - } - } Modified: branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/IBufferStrategy.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/IBufferStrategy.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/IBufferStrategy.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -28,6 +28,7 @@ import java.nio.ByteBuffer; import com.bigdata.counters.CounterSet; +import com.bigdata.journal.jini.ha.HAJournalTest.StoreState; import com.bigdata.rawstore.IAddressManager; import com.bigdata.rawstore.IMRMW; import com.bigdata.rawstore.IRawStore; @@ -276,6 +277,15 @@ */ public boolean useChecksums(); + /** + * A StoreState object references critical transient data that can be used + * to determine a degree of consistency between stores, specifically for an + * HA context. + * + * @return the StoreState + */ + public StoreState getStoreState(); + // /** // * Determines whether there are outstanding writes to the underlying store // */ Modified: branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/RWStrategy.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -49,6 +49,7 @@ import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.IBufferAccess; import com.bigdata.io.writecache.WriteCacheService; +import com.bigdata.journal.jini.ha.HAJournalTest.StoreState; import com.bigdata.mdi.IResourceMetadata; import com.bigdata.quorum.Quorum; import com.bigdata.quorum.QuorumException; @@ -905,6 +906,11 @@ public WriteCacheService getWriteCacheService() { return m_store.getWriteCacheService(); } + + @Override + public StoreState getStoreState() { + return m_store.getStoreState(); + } // @Override // public boolean isFlushed() { Modified: branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -178,7 +178,9 @@ "Address committed but not set in transients"); } - m_store.showWriteCacheDebug(paddr); + m_store.showWriteCacheDebug(paddr); + + log.warn("Physical address " + paddr + " not accessible for Allocator of size " + m_size); return 0L; } Modified: branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -101,6 +101,7 @@ import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.RootBlockView; import com.bigdata.journal.StoreTypeEnum; +import com.bigdata.journal.jini.ha.HAJournalTest.StoreState; import com.bigdata.quorum.Quorum; import com.bigdata.quorum.QuorumException; import com.bigdata.rawstore.IAllocationContext; @@ -1222,9 +1223,9 @@ /* * Utility to encapsulate RootBlock interpreation */ - static class RootBlockInfo { + class RootBlockInfo { - static int nextAllocation(final IRootBlockView rb) { + int nextAllocation(final IRootBlockView rb) { final long nxtOffset = rb.getNextOffset(); // next allocation to be made (in -32K units). @@ -1237,13 +1238,11 @@ return ret == 0 ? -(1 + META_ALLOCATION) : ret; } - /* - * Meta-Allocations stored as {int address; int[8] bits}, so each block - * holds 8*32=256 allocation slots of 1K totaling 256K. - * - * The returned int array is a flattened list of these int[9] blocks - */ - static int[] metabits(final IRootBlockView rb, final ReopenFileChannel reopener) throws IOException { + final int[] m_metabits; + final long m_storageStatsAddr; + final long m_lastDeferredReleaseTime; + + RootBlockInfo(final IRootBlockView rb) throws IOException { final long rawmbaddr = rb.getMetaBitsAddr(); /* @@ -1265,17 +1264,17 @@ */ final byte[] buf = new byte[metaBitsStore * 4]; - FileChannelUtility.readAll(reopener, ByteBuffer.wrap(buf), pmaddr); + FileChannelUtility.readAll(m_reopener, ByteBuffer.wrap(buf), pmaddr); final DataInputStream strBuf = new DataInputStream(new ByteArrayInputStream(buf)); // Can handle minor store version incompatibility strBuf.readInt(); // STORE VERSION - strBuf.readLong(); // Last Deferred Release Time + m_lastDeferredReleaseTime = strBuf.readLong(); // Last Deferred Release Time strBuf.readInt(); // cDefaultMetaBitsSize final int allocBlocks = strBuf.readInt(); - strBuf.readLong(); // m_storageStatsAddr + m_storageStatsAddr = strBuf.readLong(); // m_storageStatsAddr // step over those reserved ints for (int i = 0; i < cReservedMetaBits; i++) { @@ -1300,8 +1299,15 @@ * Meta-Allocations stored as {int address; int[8] bits}, so each block * holds 8*32=256 allocation slots of 1K totaling 256K. */ - return ret; + m_metabits = ret; } + + /* + * Meta-Allocations stored as {int address; int[8] bits}, so each block + * holds 8*32=256 allocation slots of 1K totaling 256K. + * + * The returned int array is a flattened list of these int[9] blocks + */ } /** @@ -1451,7 +1457,9 @@ for (int i = 0; i < m_metaBitsSize; i++) { m_metaBits[i] = strBuf.readInt(); } - m_metaTransientBits = (int[]) m_metaBits.clone(); + // m_metaTransientBits = (int[]) m_metaBits.clone(); + + syncMetaTransients(); final int numFixed = m_allocSizes.length; @@ -1478,6 +1486,18 @@ + ", " + m_metaBitsAddr); } } + + /** + * Uses System.arraycopy rather than clone() to duplicate the + * metaBits to the metaTransientBits, which will be faster. + */ + private void syncMetaTransients() { + if (m_metaTransientBits == null || m_metaTransientBits.length != m_metaBits.length) { + m_metaTransientBits = (int[]) m_metaBits.clone(); + } else { + System.arraycopy(m_metaBits, 0, m_metaTransientBits, 0, m_metaTransientBits.length); + } + } // /* // * Called when store is opened to make sure any deferred frees are @@ -2842,6 +2862,11 @@ isolatedWrites = isolatedWrites || fa.reset(m_writeCacheService, m_committedNextAllocation); } + /** + * Now clone the transient metabits for protection if this service becomes leader + */ + syncMetaTransients(); + if (!isolatedWrites) { /** * Now we should be able to unwind any unused allocators and unused @@ -3114,7 +3139,7 @@ // to provide control // writeFileSpec(); - m_metaTransientBits = (int[]) m_metaBits.clone(); + syncMetaTransients(); // Must be called from AbstractJournal commitNow after writeRootBlock // postCommit(); @@ -3500,6 +3525,9 @@ (b * cDefaultMetaBitsSize) + 1, cDefaultMetaBitsSize-1); if (ret != -1) { + // The assumption is that this bit is also NOT set in m_metaBits + assert !tstBit(m_metaBits, ret); + return ret; } } @@ -6194,14 +6222,40 @@ log.trace("Allocator " + index + ", size: " + xfa.m_size + ", startAddress: " + xfa.getStartAddr() + ", allocated: " + (xfa.getAllocatedSlots()/xfa.m_size)); } } - + + // Update m_metaBits addr and m_nextAllocation to ensure able to allocate as well as read! + { + final long nxtOffset = rbv.getNextOffset(); + + // next allocation to be made (in -32K units). + m_nextAllocation = -(int) (nxtOffset >> 32); + + if (m_nextAllocation == 0) { + throw new IllegalStateException("Invalid state for non-empty store"); + } + + m_committedNextAllocation = m_nextAllocation; + + final long savedMetaBitsAddr = m_metaBitsAddr; + // latched offset of the metabits region. + m_metaBitsAddr = -(int) nxtOffset; + + if (savedMetaBitsAddr != m_metaBitsAddr) + log.warn("Old metaBitsAddr: " + savedMetaBitsAddr + ", new metaBitsAddr: " + m_metaBitsAddr); + } + final ArrayList<FixedAllocator> nallocs = new ArrayList<FixedAllocator>(); // current metabits final int[] oldmetabits = m_metaBits; // new metabits - m_metaBits = RootBlockInfo.metabits(rbv, m_reopener); + final RootBlockInfo rbi = new RootBlockInfo(rbv); + m_metaBits = rbi.m_metabits; + // and grab the last deferred release and storageStats! + m_lastDeferredReleaseTime = rbi.m_lastDeferredReleaseTime; + m_storageStatsAddr = rbi.m_storageStatsAddr; + if(log.isTraceEnabled()) log.trace("Metabits length: " + m_metaBits.length); @@ -6903,6 +6957,16 @@ } + public String showAllocatorList() { + final StringBuilder sb = new StringBuilder(); + + for (int index = 0; index < m_allocs.size(); index++) { + final FixedAllocator xfa = m_allocs.get(index); + sb.append("Allocator " + index + ", size: " + xfa.m_size + ", startAddress: " + xfa.getStartAddr() + ", allocated: " + xfa.getAllocatedSlots() + "\n"); + } + + return sb.toString(); + } // /** // * // * @return whether WCS is flushed @@ -6913,6 +6977,75 @@ // return this.m_writeCacheService.isFlushed(); // } + public static class RWStoreState extends StoreState { + + /** + * Generated ID + */ + private static final long serialVersionUID = 4315400143557397323L; + + /* + * Transient state necessary for consistent ha leader transition + */ + int m_fileSize; + int m_nextAllocation; + int m_committedNextAllocation; + long m_minReleaseAge; + long m_lastDeferredReleaseTime; + long m_storageStatsAddr; + int m_allocsSize; + int m_metaBitsAddr; + int m_metaBitsSize; + + public boolean equals(final Object obj) { + if (obj == null || !(obj instanceof RWStoreState)) + return false; + final RWStoreState other = (RWStoreState) obj; + return m_fileSize == other.m_fileSize + && m_nextAllocation == other.m_nextAllocation + && m_committedNextAllocation == other.m_committedNextAllocation + && m_minReleaseAge == other.m_minReleaseAge + && m_lastDeferredReleaseTime == other.m_lastDeferredReleaseTime + && m_storageStatsAddr == other.m_storageStatsAddr + && m_allocsSize == other.m_allocsSize + && m_metaBitsAddr == other.m_metaBitsAddr + && m_metaBitsSize == other.m_metaBitsSize; + } + + public String toString() { + final StringBuilder sb = new StringBuilder(); + + sb.append("RWStoreState\n"); + sb.append("fileSize: " + m_fileSize + "\n"); + sb.append("nextAllocation: " + m_nextAllocation + "\n"); + sb.append("committedNextAllocation: " + m_committedNextAllocation + "\n"); + sb.append("minReleaseAge: " + m_minReleaseAge + "\n"); + sb.append("lastDeferredReleaseTime: " + m_lastDeferredReleaseTime + "\n"); + sb.append("storageStatsAddr: " + m_storageStatsAddr + "\n"); + sb.append("allocsSize: " + m_allocsSize + "\n"); + sb.append("metaBitsAddr: " + m_metaBitsAddr + "\n"); + sb.append("metaBitsSize: " + m_metaBitsSize + "\n"); + + return sb.toString(); + } + } + + public StoreState getStoreState() { + final RWStoreState ret = new RWStoreState(); + + ret.m_fileSize = m_fileSize; + ret.m_nextAllocation = m_nextAllocation; + ret.m_committedNextAllocation = m_committedNextAllocation; + ret.m_minReleaseAge = m_minReleaseAge; + ret.m_lastDeferredReleaseTime = m_lastDeferredReleaseTime; + ret.m_storageStatsAddr = m_storageStatsAddr; + ret.m_allocsSize = m_allocs.size(); + ret.m_metaBitsAddr = m_metaBitsAddr; + ret.m_metaBitsSize = m_metaBits.length; + + return ret; + } + // public void prepareForRebuild(final HARebuildRequest req) { // assert m_rebuildRequest == null; // Modified: branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/rwstore/sector/MemStrategy.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/rwstore/sector/MemStrategy.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/java/com/bigdata/rwstore/sector/MemStrategy.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -41,6 +41,7 @@ import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.RootBlockView; import com.bigdata.journal.StoreTypeEnum; +import com.bigdata.journal.jini.ha.HAJournalTest.StoreState; import com.bigdata.mdi.IResourceMetadata; import com.bigdata.rawstore.IAddressManager; import com.bigdata.rawstore.IAllocationContext; @@ -497,6 +498,11 @@ return m_dirty; } + @Override + public StoreState getStoreState() { + throw new UnsupportedOperationException(); + } + // @Override // public boolean isFlushed() { // return true; Modified: branches/PIPELINE_RESYNC/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -25,14 +25,25 @@ package com.bigdata.ha.pipeline; import java.io.IOException; +import java.io.InputStream; import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; import java.nio.channels.Selector; +import java.nio.channels.SocketChannel; +import java.util.Random; +import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; +import com.bigdata.btree.BytesUtil; import com.bigdata.ha.msg.HAWriteMessageBase; import com.bigdata.ha.msg.IHAWriteMessageBase; import com.bigdata.io.DirectBufferPool; @@ -169,22 +180,29 @@ public void testSimpleExchange() throws InterruptedException, ExecutionException, TimeoutException { - final long timeout = 5000; // ms + doSimpleExchange(); + } + + private void doSimpleExchange() throws InterruptedException, + ExecutionException, TimeoutException { + + final long timeout = 5000; // ms final ByteBuffer tst1 = getRandomData(50); - final IHAWriteMessageBase msg1 = new HAWriteMessageBase(50, chk.checksum(tst1)); + final IHAWriteMessageBase msg1 = new HAWriteMessageBase(50, + chk.checksum(tst1)); final ByteBuffer rcv1 = ByteBuffer.allocate(2000); final ByteBuffer rcv2 = ByteBuffer.allocate(2000); // rcv.limit(50); final Future<Void> futRec1 = receiveServiceB.receiveData(msg1, rcv1); final Future<Void> futRec2 = receiveServiceC.receiveData(msg1, rcv2); final Future<Void> futSnd = sendServiceA.send(tst1, msg1.getToken()); - futSnd.get(timeout,TimeUnit.MILLISECONDS); - futRec1.get(timeout,TimeUnit.MILLISECONDS); - futRec2.get(timeout,TimeUnit.MILLISECONDS); + futSnd.get(timeout, TimeUnit.MILLISECONDS); + futRec1.get(timeout, TimeUnit.MILLISECONDS); + futRec2.get(timeout, TimeUnit.MILLISECONDS); assertEquals(tst1, rcv1); assertEquals(rcv1, rcv2); } - + public void testChecksumError() throws InterruptedException, ExecutionException { @@ -711,4 +729,139 @@ } } + public void testSimpleReset() throws InterruptedException, + ExecutionException, TimeoutException { + + doSimpleExchange(); + + sendServiceA.resetSocket(); + + doSimpleExchange(); + } + + /** + * The use of threaded tasks in the send/receive service makes it difficult to + * observer the socket state changes. + * + * So let's begin by writing some tests over the raw sockets. + * + * @throws IOException + */ + public void testDirectSockets() throws IOException { + final InetSocketAddress serverAddr = new InetSocketAddress(getPort(0)); + + final ServerSocket ss = new ServerSocket(); + ss.bind(serverAddr); + + final SocketChannel cs = SocketChannel.open(); + + cs.connect(serverAddr); + + final Random r = new Random(); + final byte[] data = new byte[200]; + r.nextBytes(data); + + final ByteBuffer src = ByteBuffer.wrap(data); + + cs.write(src); + + final byte[] dst = new byte[200]; + + final Socket readSckt1 = ss.accept(); + + InputStream instr = readSckt1.getInputStream(); + + instr.read(dst); + + assertTrue(BytesUtil.bytesEqual(data, dst)); + + // now write some more data into the channel and then close it + cs.write(ByteBuffer.wrap(data)); + + // close the client socket + cs.close(); + + // and see what happens when we try to accept the data + // we expect it to hang and timeout! + assertTimout(1, TimeUnit.SECONDS, new Callable<Void>() { + + @Override + public Void call() throws Exception { + ss.accept(); + + return null; + }}); + + // Now try writing some more data + try { + cs.write(ByteBuffer.wrap(data)); + fail("Expected closed channel exception"); + } catch (ClosedChannelException e) { + // expected + } + + // the old stream should be closed + try { + final int rdlen = instr.read(dst); // should be closed + fail("Expected closed socket exception, rdlen: " + rdlen); + } catch (Exception e) { + // expected; + } + + // if so then should we explicitly close its socket? + readSckt1.close(); + + final SocketChannel cs2 = SocketChannel.open(); + cs2.connect(serverAddr); + cs2.write(ByteBuffer.wrap(data)); + + // Now we should be able to accept the new write + final AtomicReference<Socket> av = new AtomicReference<Socket>(); + assertNoTimout(1, TimeUnit.SECONDS, new Callable<Void>() { + + @Override + public Void call() throws Exception { + av.set(ss.accept()); + + return null; + }}); + + // the new socket and associated stream should be good to go + av.get().getInputStream().read(dst); + + assertTrue(BytesUtil.bytesEqual(data, dst)); + + + } + + private void assertTimout(long timeout, TimeUnit unit, Callable<Void> callable) { + final ExecutorService es = Executors.newSingleThreadExecutor(); + try { + final Future<Void> ret = es.submit(callable); + ret.get(timeout, unit); + fail("Expected timeout"); + } catch (TimeoutException e) { + // that is expected + return; + } catch (Exception e) { + fail("Expected timeout"); + } finally { + es.shutdown(); + } + } + + private void assertNoTimout(long timeout, TimeUnit unit, Callable<Void> callable) { + final ExecutorService es = Executors.newSingleThreadExecutor(); + try { + final Future<Void> ret = es.submit(callable); + ret.get(timeout, unit); + } catch (TimeoutException e) { + fail("Unexpected timeout"); + } catch (Exception e) { + fail("Unexpected Exception", e); + } finally { + es.shutdown(); + } + } + } Modified: branches/PIPELINE_RESYNC/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java =================================================================== --- branches/PIPELINE_RESYNC/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java 2013-11-26 18:30:22 UTC (rev 7598) +++ branches/PIPELINE_RESYNC/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java 2013-11-27 14:23:29 UTC (rev 7599) @@ -29,15 +29,23 @@ package com.bigdata.journal; import java.io.IOException; +import java.util.LinkedList; +import java.util.List; import java.util.UUID; +import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; +import java.util.concurrent.FutureTask; +import java.util.concurrent.TimeUnit; import com.bigdata.btree.AbstractBTreeTestCase; import com.bigdata.btree.BTree; import com.bigdata.btree.HTreeIndexMetadata; import com.bigdata.btree.IndexMetadata; import com.bigdata.btree.keys.KV; +import com.bigdata.concurrent.FutureTaskMon; import com.bigdata.htree.HTree; +import com.bigdata.rwstore.IRWStrategy; +import com.bigdata.util.concurrent.LatchedExecutor; /** * Test suite for {@link DumpJournal}. @@ -66,8 +74,10 @@ /** * @param name */ - public TestDumpJournal(String name) { + public TestDumpJournal(final String name) { + super(name); + } /** @@ -361,4 +371,229 @@ } + /** + * Unit test for {@link DumpJournal} with concurrent updates against the + * backing store. This is intended primarily to detect failures to protect + * against the recycling model associated with the {@link IRWStrategy}. + * + * @throws Exception + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/762"> + * DumpJournal does not protect against concurrent updates (NSS) </a> + */ + public void test_dumpJournal_concurrent_updates() throws Exception { + + final String PREFIX = "testIndex#"; + final int NUM_INDICES = 4; + + Journal src = getStore(getProperties()); + + try { + + for (int i = 0; i < NUM_INDICES; i++) { + + // register an index + final String name = PREFIX + i; + + src.registerIndex(new IndexMetadata(name, UUID.randomUUID())); + { + + // lookup the index. + final BTree ndx = src.getIndex(name); + + // #of tuples to write. + final int ntuples = r.nextInt(1000); + + // generate random data. + final KV[] a = AbstractBTreeTestCase + .getRandomKeyValues(ntuples); + + // write tuples (in random order) + for (KV kv : a) { + + ndx.insert(kv.key, kv.val); + + if (r.nextInt(100) < 10) { + + // randomly increment the counter (10% of the time). + ndx.getCounter().incrementAndGet(); + + } + + } + + } + + } + + // commit the journal (!) + src.commit(); + + /** + * Task to run various DumpJournal requests. + */ + final class DumpTask implements Callable<Void> { + + private final Journal src; + + public DumpTask(final Journal src) { + + this.src = src; + + } + + public Void call() throws Exception { + + new DumpJournal(src) + .dumpJournal(false/* dumpHistory */, + true/* dumpPages */, + false/* dumpIndices */, false/* showTuples */); + + new DumpJournal(src) + .dumpJournal(true/* dumpHistory */, + true/* dumpPages */, true/* dumpIndices */, + false/* showTuples */); + + // test again w/o dumpPages + new DumpJournal(src) + .dumpJournal(true/* dumpHistory */, + false/* dumpPages */, + true/* dumpIndices */, false/* showTuples */); + + return (Void) null; + + } + + } + + final class UpdateTask implements Callable<Void> { + + private final Journal src; + + public UpdateTask(final Journal src) { + + this.src = src; + + } + + public Void call() throws Exception { + + /* + * Now write some more data, going through a series of commit + * points. This let's us check access to historical commit points. + */ + for (int j = 0; j < 10; j++) { + + for (int i = 0; i < NUM_INDICES; i++) { + + // register an index + final String name = PREFIX + i; + + // lookup the index. + final BTree ndx = src.getIndex(name); + + // #of tuples to write. + final int ntuples = r.nextInt(1000); + + // generate random data. + final KV[] a = AbstractBTreeTestCase + .getRandomKeyValues(ntuples); + + // write tuples (in random order) + for (KV kv : a) { + + ndx.insert(kv.key, kv.val); + + if (r.nextInt(100) < 10) { + + // randomly increment the counter (10% of the time). + ... [truncated message content] |
From: <tho...@us...> - 2013-11-26 18:30:28
|
Revision: 7598 http://bigdata.svn.sourceforge.net/bigdata/?rev=7598&view=rev Author: thompsonbry Date: 2013-11-26 18:30:22 +0000 (Tue, 26 Nov 2013) Log Message: ----------- Branch for #566 and #753. Added Paths: ----------- branches/NSS_GROUP_COMMIT/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-26 18:21:13
|
Revision: 7597 http://bigdata.svn.sourceforge.net/bigdata/?rev=7597&view=rev Author: thompsonbry Date: 2013-11-26 18:21:06 +0000 (Tue, 26 Nov 2013) Log Message: ----------- Checkpoint on #763 (analytic query mode stochastic errors). I have not yet identified the root cause. These edits simplify some code paths but have no semantics change and do not fix the problem. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinUtility.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinUtility.java 2013-11-26 14:21:05 UTC (rev 7596) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinUtility.java 2013-11-26 18:21:06 UTC (rev 7597) @@ -349,6 +349,7 @@ * Human readable representation of the {@link IHashJoinUtility} metadata * (but not the solutions themselves). */ + @Override public String toString() { final StringBuilder sb = new StringBuilder(); @@ -417,30 +418,35 @@ } + @Override public JoinTypeEnum getJoinType() { return joinType; } - + + @Override public IVariable<?> getAskVar() { return askVar; } - + + @Override public IVariable<?>[] getJoinVars() { return joinVars; } - + + @Override public IVariable<?>[] getSelectVars() { return selectVars; } - + + @Override public IConstraint[] getConstraints() { return constraints; @@ -1114,8 +1120,8 @@ nJoinsConsidered.increment(); - if (noJoinVars&& - nJoinsConsidered.get() == noJoinVarsLimit) { + if (noJoinVars + && nJoinsConsidered.get() == noJoinVarsLimit) { if (nleftConsidered.get() > 1 && nrightConsidered.get() > 1) { @@ -1354,7 +1360,7 @@ * * Note: EXISTS depends on this to have the correct cardinality. If * EXISTS allows duplicate solutions into the join set then having - * multiple left solutions which satisify the EXISTS filter will + * multiple left solutions which satisfy the EXISTS filter will * cause multiple copies of the right solution to be output! If you * change the joinSet to allow duplicates, then it MUST NOT allow * them for EXISTS! @@ -1424,8 +1430,15 @@ final ITuple<?> t = sitr.next(); - IBindingSet rightSolution = decodeSolution(t); + final ByteArrayBuffer tb = t.getValueBuffer(); + /* + * Note: This MUST be treated as effectively immutable since we + * may have to output multiple solutions for each rightSolution. + * Those output solutions MUST NOT side-effect [rightSolutions]. + */ + final IBindingSet rightSolution = decodeSolution(t); + // The hash code is based on the entire solution for the // joinSet. final int hashCode = rightSolution.hashCode(); @@ -1442,8 +1455,6 @@ final ITuple<?> xt = jitr.next(); - final ByteArrayBuffer tb = t.getValueBuffer(); - final ByteArrayBuffer xb = xt.getValueBuffer(); if (0 == BytesUtil.compareBytesWithLenAndOffset( @@ -1466,25 +1477,27 @@ * it as an optional solution. */ - if (selectVars != null) {// && selectVars.length > 0) { + IBindingSet bs = rightSolution; - // Only output the projected variables. - rightSolution = rightSolution.copy(selectVars); + if (selectVars != null) { + // Drop variables which are not projected. + bs = bs.copy(selectVars); + } - encoder.resolveCachedValues(rightSolution); + encoder.resolveCachedValues(bs); if (f != null) { - if (selectVars == null) - rightSolution = rightSolution.clone(); - - rightSolution.set( askVar, f); + if (bs == rightSolution) + bs = rightSolution.clone(); + bs.set(askVar, f); + } - outputBuffer.add(rightSolution); + outputBuffer.add(bs); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-26 14:21:12
|
Revision: 7596 http://bigdata.svn.sourceforge.net/bigdata/?rev=7596&view=rev Author: thompsonbry Date: 2013-11-26 14:21:05 +0000 (Tue, 26 Nov 2013) Log Message: ----------- added utility methods to apply IRDFParserOptions to an RDFParser Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/RDFParserOptions.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/RDFParserOptions.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/RDFParserOptions.java 2013-11-26 14:17:48 UTC (rev 7595) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/RDFParserOptions.java 2013-11-26 14:21:05 UTC (rev 7596) @@ -256,6 +256,21 @@ p.setVerifyData(verifyData); } + /** + * Utility method applies the options to the {@link RDFParser}. + * + * @param opts + * The options. + * @param p + * The parser. + */ + public static void apply(final IRDFParserOptions opts, final RDFParser p) { + p.setDatatypeHandling(opts.getDatatypeHandling()); + p.setPreserveBNodeIDs(opts.getPreserveBNodeIDs()); + p.setStopAtFirstError(opts.getStopAtFirstError()); + p.setVerifyData(opts.getVerifyData()); + } + @Override public boolean equals(final Object o) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-26 14:17:59
|
Revision: 7595 http://bigdata.svn.sourceforge.net/bigdata/?rev=7595&view=rev Author: thompsonbry Date: 2013-11-26 14:17:48 +0000 (Tue, 26 Nov 2013) Log Message: ----------- @Override annotations Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/RDFParserOptions.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/RDFParserOptions.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/RDFParserOptions.java 2013-11-25 20:32:21 UTC (rev 7594) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/RDFParserOptions.java 2013-11-26 14:17:48 UTC (rev 7595) @@ -195,6 +195,7 @@ } + @Override public synchronized String toString() { return super.toString() + // "{verifyData=" + verifyData + // @@ -255,6 +256,7 @@ p.setVerifyData(verifyData); } + @Override public boolean equals(final Object o) { if (this == o) This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-25 20:32:27
|
Revision: 7594 http://bigdata.svn.sourceforge.net/bigdata/?rev=7594&view=rev Author: thompsonbry Date: 2013-11-25 20:32:21 +0000 (Mon, 25 Nov 2013) Log Message: ----------- reverting snapshot to true Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.properties Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.properties 2013-11-25 20:29:21 UTC (rev 7593) +++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2013-11-25 20:32:21 UTC (rev 7594) @@ -89,7 +89,7 @@ # Set true to do a snapshot build. This changes the value of ${version} to # include the date. -snapshot=false +snapshot=true # Javadoc build may be disabled using this property. The javadoc target will # not be executed unless this property is defined (its value does not matter). This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-25 20:29:28
|
Revision: 7593 http://bigdata.svn.sourceforge.net/bigdata/?rev=7593&view=rev Author: thompsonbry Date: 2013-11-25 20:29:21 +0000 (Mon, 25 Nov 2013) Log Message: ----------- incremental progress toward an rpm install Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.properties branches/BIGDATA_RELEASE_1_3_0/build.xml branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.properties 2013-11-25 17:43:33 UTC (rev 7592) +++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2013-11-25 20:29:21 UTC (rev 7593) @@ -89,7 +89,7 @@ # Set true to do a snapshot build. This changes the value of ${version} to # include the date. -snapshot=true +snapshot=false # Javadoc build may be disabled using this property. The javadoc target will # not be executed unless this property is defined (its value does not matter). @@ -98,6 +98,15 @@ # javadoc JVM is explicitly set in the javadoc target in the build.xml file. javadoc= +# packaging property set (rpm, deb). +package.release=1 +package.prefix=/usr +package.conf.dir=/etc/bigdata +package.fedname=BigdataFed +package.pid.dir=/var/run/bigdata +package.var.dir=/var/lib/bigdata +package.share.dir=/usr/share/bigdata + # The SCP program to use when uploading javadoc or releases. #ssh.scp=C:/Program Files/PuTTY/pscp ssh.scp=/usr/bin/scp Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-25 17:43:33 UTC (rev 7592) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-25 20:29:21 UTC (rev 7593) @@ -1254,7 +1254,27 @@ <mkdir dir="${rpm.dir}/SOURCES" /> <mkdir dir="${rpm.dir}/SPECS" /> <mkdir dir="${rpm.dir}/SRPMS" /> - <copy file="${bigdata.dir}/src/resources/rpm/bigdata.spec" todir="${rpm.dir}/SPECS"/> + <!-- Copy rpm spec file, replacing meta variables in the target. --> + <copy file="${bigdata.dir}/src/resources/rpm/bigdata.spec" todir="${rpm.dir}/SPECS"> + <filterchain> + <replacetokens> + <token key="build.ver" value="${build.ver}" /> + <token key="java.version" value="${javac.source}" /> + <token key="zookeeper.version" value="${zookeeper.version}" /> + <token key="package.release" value="${package.release}" /> + <token key="package.prefix" value="${package.prefix}" /><!--/usr--> + <token key="package.conf.dir" value="${package.conf.dir}" /><!--/etc/bigdata--> + <token key="package.pid.dir" value="${package.pid.dir}" /><!--/var/run/bigdata--> + <token key="package.var.dir" value="${package.var.dir}" /><!--/var/lib/bigdata--> + <token key="package.share.dir" value="${package.share.dir}" /> + <!-- + <token key="final.name" value="${final.name}" /> + <token key="package.build.dir" value="${package.build.dir}" /> + <token key="package.log.dir" value="${package.log.dir}" /> + <token key="c.lib" value="${c.lib}" /> --> + </replacetokens> + </filterchain> + </copy> <!-- build version of tarball that includes the version number in the filename. --> <copydir dest="${rpm.dir}/SOURCES/${version}" src="${stage.dir}/bigdata" /> <tar destfile="${rpm.dir}/SOURCES/${version}.tar.gz" @@ -1272,13 +1292,22 @@ <include name="${version}/bin/startHAServices" /> </tarfileset> </tar> -<!-- <copy file="${bigdata.dir}/REL.${version}.tgz" tofile="${rpm.dir}/SOURCES/${version}.tar.gz"/> --> + <!-- build rpm. --> <rpm specFile="bigdata.spec" topDir="ant-build/rpm" cleanBuildDir="true" - failOnError="false"/> - + failOnError="true"/> + <!-- copy to target location in build.dir. --> + <copy todir="${build.dir}/" flatten="true"> + <fileset dir="${rpm.dir}/RPMS"> + <include name="**/${version}*.rpm" /> + </fileset> + <fileset dir="${rpm.dir}/SRPMS"> + <include name="**/${version}*.rpm" /> + </fileset> + </copy> + <delete dir="${rpm.dir}" quiet="true" verbose="false"/> </target> <target name="ant-install-prepare" depends="jar, bundle" Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec 2013-11-25 17:43:33 UTC (rev 7592) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec 2013-11-25 20:29:21 UTC (rev 7593) @@ -1,7 +1,7 @@ Summary: bigdata highly available RDF/graph/SPARQL database Name: bigdata -Version: 1.2.3 -Release: 1 +Version: @build.ver@ +Release: @package.release@ License: GPLv2 Group: Applications/Databases URL: http://www.bigdata.com/blog @@ -9,11 +9,10 @@ BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root BuildArch: noarch -Requires: java >= 1.6 -Requires: zookeeper >= 3.3 +Requires: java >= @java.version@ +Requires: zookeeper >= @zookeeper.version@ %description - Bigdata is a horizontally-scaled, open-source architecture for indexed data with an emphasis on RDF capable of loading 1B triples in under one hour on a 15 node cluster. Bigdata operates in both a single machine mode (Journal), highly available replication cluster mode (HAJournalServer), and a horizontally sharded cluster mode (BigdataFederation). The Journal provides fast scalable ACID indexed storage for very large data sets, up to 50 billion triples / quads. The HAJournalServer adds replication, online backup, horizontal scaling of query, and high availability. The federation provides fast scalable shard-wise parallel indexed storage using dynamic sharding and shard-wise ACID updates and incremental cluster size growth. Both platforms support fully concurrent readers with snapshot isolation. Distributed processing offers greater throughput but does not reduce query or update latency. Choose the Journal when the anticipated scale and throughput requirements permit. Choose the HAJournalServer for high availability and linear scaling in query throughput. Choose the Federation when the administrative and machine overhead associated with operating a cluster is an acceptable tradeoff to have essentially unlimited data scaling and throughput. @@ -30,9 +29,18 @@ %build # NOP: The RPM is generated from "binaries". +mkdir -p %{_builddir}/%name-%ve...@pa...efix@ %install rm -rf $RPM_BUILD_ROOT +# Rename file paths to reflect package prefix. +%{__mv} %{_builddir}/%name-%version/etc %{_builddir}/%name-%ve...@pa...efix@/etc +%{__mv} %{_builddir}/%name-%version/bin %{_builddir}/%name-%ve...@pa...efix@/bin +%{__mv} %{_builddir}/%name-%version/doc %{_builddir}/%name-%ve...@pa...efix@/doc +%{__mv} %{_builddir}/%name-%version/var %{_builddir}/%name-%ve...@pa...efix@/var +%{__mv} %{_builddir}/%name-%version/lib %{_builddir}/%name-%ve...@pa...efix@/lib +%{__mv} %{_builddir}/%name-%version/lib-dl %{_builddir}/%name-%ve...@pa...efix@/lib-dl +%{__mv} %{_builddir}/%name-%version/lib-ext %{_builddir}/%name-%ve...@pa...efix@/lib-ext # Copy all files from BUILD to BUILDROOT %{__cp} -Rip %{_builddir}/* $RPM_BUILD_ROOT @@ -40,15 +48,17 @@ rm -rf $RPM_BUILD_ROOT %files +# FIXME We need pre-/post-install and un-install scripts to symlink +# the init.d script and copy in /etc/bigdata/bigdataHA.config %defattr(-,root,root,-) -%doc /doc -%config /etc/bigdata -%config /var/config -/etc/init.d/bigdataHA -/bin -/lib-dl -/lib-ext -/lib +%config @package.prefix@/etc/bigdata +...@pa...efix@/etc/init.d/bigdataHA +%config @package.prefix@/var/config +%doc @package.prefix@/doc +...@pa...efix@/bin +...@pa...efix@/lib +...@pa...efix@/lib-dl +...@pa...efix@/lib-ext %changelog * Sun Nov 24 2013 EC2 Default User <ec2...@ip...ernal> - This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-25 17:43:39
|
Revision: 7592 http://bigdata.svn.sourceforge.net/bigdata/?rev=7592&view=rev Author: thompsonbry Date: 2013-11-25 17:43:33 +0000 (Mon, 25 Nov 2013) Log Message: ----------- continued work on a bigdata rpm artifact. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec 2013-11-25 16:45:46 UTC (rev 7591) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec 2013-11-25 17:43:33 UTC (rev 7592) @@ -9,7 +9,8 @@ BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root BuildArch: noarch -Requires: java +Requires: java >= 1.6 +Requires: zookeeper >= 3.3 %description @@ -18,7 +19,7 @@ Distributed processing offers greater throughput but does not reduce query or update latency. Choose the Journal when the anticipated scale and throughput requirements permit. Choose the HAJournalServer for high availability and linear scaling in query throughput. Choose the Federation when the administrative and machine overhead associated with operating a cluster is an acceptable tradeoff to have essentially unlimited data scaling and throughput. %package javadoc -Summary: API documentation for %{name}-%{version} +Summary: API documentation for %{name}-%{version} BuildArch: noarch %description javadoc @@ -29,14 +30,10 @@ %build # NOP: The RPM is generated from "binaries". -#echo "BUILD: pwd=`pwd`" -#ls -l %install rm -rf $RPM_BUILD_ROOT -#echo "RPM_BUILD_ROOT=$RPM_BUILD_ROOT" -#echo "INSTALL: pwd=`pwd`" -# copy files from BUILD to BUILDROOT +# Copy all files from BUILD to BUILDROOT %{__cp} -Rip %{_builddir}/* $RPM_BUILD_ROOT %clean @@ -45,18 +42,10 @@ %files %defattr(-,root,root,-) %doc /doc -#%config /etc/bigdata +%config /etc/bigdata %config /var/config -/bin/bigdataHA -/bin/bigdataHAEnv -/bin/config/browser.config -/bin/config/disco-logging.properties -/bin/config/disco.config -/bin/config/reggie.config -/bin/config/serviceStarter.config -/bin/disco-tool -/bin/pstart -/bin/startHAServices +/etc/init.d/bigdataHA +/bin /lib-dl /lib-ext /lib This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-25 16:45:55
|
Revision: 7591 http://bigdata.svn.sourceforge.net/bigdata/?rev=7591&view=rev Author: thompsonbry Date: 2013-11-25 16:45:46 +0000 (Mon, 25 Nov 2013) Log Message: ----------- Continued work toward an rpm artifact. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.xml branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README.dev branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/bigdata/ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/bigdata/bigdataHA.config branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHAEnv Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-25 16:43:44 UTC (rev 7590) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-25 16:45:46 UTC (rev 7591) @@ -880,6 +880,9 @@ <mkdir dir="${dist.var.config.jini}" /> <mkdir dir="${dist.doc}" /> <mkdir dir="${dist.doc.legal}" /> + <mkdir dir="${dist.dir}/etc" /> + <mkdir dir="${dist.dir}/etc/init.d" /> + <mkdir dir="${dist.dir}/etc/bigdata" /> <!-- Copy the jar files created by the jar target to --> <!-- an application-specific but non-version-specific --> @@ -1051,12 +1054,12 @@ todir="${dist.bin}" /> <chmod file="${dist.bin}/startHAServices" perm="755" /> - <copy file="${src.resources}/bin/bigdataHA" - todir="${dist.bin}" /> - <chmod file="${dist.bin}/bigdataHA" perm="755" /> + <copy file="${src.resources}/etc/init.d/bigdataHA" + todir="${dist.dir}/etc/init.d" /> + <chmod file="${dist.dir}/etc/init.d/bigdataHA" perm="755" /> - <copy file="${src.resources}/bin/bigdataHAEnv" - todir="${dist.bin}" /> + <copy file="${src.resources}/etc/bigdata/bigdataHA.config" + todir="${dist.dir}/etc/bigdata" /> <copy file="${src.resources}/bin/config/browser.config" todir="${dist.bin.config}" /> @@ -1150,6 +1153,18 @@ <include name="**/LEGAL/*" /> </fileset> </copy> + + <!-- Stage README. --> + <copy file="${src.resources}/HAJournal/README" + todir="${dist.dir}/doc" /> + + <!-- Stage documentation from the wiki. --> + <get dest="${dist.doc}/HAJournalServer.html" + src="http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=HAJournalServer&printable=yes" + /> + <get dest="${dist.doc}/NanoSparqlServer.html" + src="http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=NanoSparqlServer&printable=yes" + /> </target> @@ -1193,6 +1208,14 @@ bigdata/doc/LEGAL - license files for dependencies. bigdata/doc/LICENSE.txt - bigdata license file. bigdata/doc/NOTICE - copyright NOTICE files. + bigdata/doc/docs - javadoc (FIXME INSTALL JAVADOC, HA wiki page) + bigdata/etc/init.d/bigdataHA - HA services start/stop script. + bigdata/etc/bigdata/bigdataHA.config - HA services required config file. + + Note: This directory structure is currently reused for the rpm, but the + top-level of the rpm directory structure includes the release version as + bigdata.X.Y.Z rather than just "bigdata". I think that this is a better + practice and the two may be converged. --> <target name="deploy-artifact" depends="clean, stage" description="Create compressed tar file for deployment."> Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README 2013-11-25 16:43:44 UTC (rev 7590) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README 2013-11-25 16:45:46 UTC (rev 7591) @@ -1,82 +1,141 @@ -This directory contains some sample configuration for a highly available -Journal. +Bigdata Highly Available Replication Cluster -Note: The bigdata scripts bundled in this directory are designed to be run -from the root directory of the SVN checkout of the bigdata code base. This -is used for developers. The installation is done using the top-level ant -build file and the "ant deploy-artifact" target. +========== INSTALL ========== -The basic procedure is: +0. The nodes MUST have synchronized clocks, both for logging and to + ensure that the transaction services have closely correlated clocks + for assigning timestamps. Make sure that ntp or a similar service + is running to synchronize the clocks. -0. The nodes MUST have synchronized clocks, both for logging and to ensure - that the transaction services have closely correlated clocks for assigning - timestamps. Make sure that ntp or a similar service is running to synchronize - the clocks. +1. Edit the various configuration files. At a minimum, you must edit + bigdataHA.conf. -1. Edit the various configuration files. You will have to specify the - replicationFactor for the HAJournal in the HAJournal.config file. Make - sure to check all the configuration properties. +2. Make sure that ZooKeeper is up and running with a consistent + configuration and that it is logging somewhere where you can find + the log later. For a highly available ZooKeeper configuration, you + need to be running at least 3 ZooKeeper nodes. Consult the + ZooKeeper documentation for more information. -2. Make sure that zookeeper is up and running with a consistent configuration - and that it is logging somewhere where you can find the log later. A good - approach is to use nohup so the console output will wind up in the directory - from which you start zookeeper. Do not put zookeeper in the background or - it can block once the console buffer is full. For a highly available zk - configuration, you need to be running at least 3 zk nodes. Consult the zk - documentation for more information. + Bigdata does NOT start/stop Apache ZooKeeper. ZooKeeper is + generally administered separately. If you are not already using + Apache ZooKeeper, then you should install three VMs with Apache + ZooKeeper onto machines with fixed IP addresses. -3. Start the ClassServer on each machine. This will let the service registrar - find the downloadable jars on that machine. + Note: If you begin with a standalone ZooKeeper instance, then you + WILL NOT be able to automatically migrate to a highly available + configuration without stopping your standalone ZooKeeper instance. + Your life will be significantly easier if you begin with a highly + available ZooKeeper instance. Bigdata does not put a heavy load on + ZooKeeper, but running bigdata and ZooKeeper on the same instances + will make it more complex to administer your environment since + stopping a single node will reduce availability for both ZooKeeper + and bigdata. A recommended practice is to allocate three ZooKeeper + VMs with fixed IP addresses when you begin to setup your bigdata + cluster. -4. Start the service registrar on at least one machine (as configured by - the locators). A highly available jini/river service will run multiple - service registrar and provide either multiple unicast locators or support - multicast discovery of the service registrar. Consult the jini/river - documentation for more information. - -5. Start the HAJournalServer on [k] machines, where [k] is the replication - factor you specified in the HAJournal.config file. The quorum should - meet once (k+1)/2 services join (majority rule). At this point one of - the nodes will be elected as the leader. You can write on that node - (e.g., using SPARQL UPDATE). You can read on any node that is joined - with the met quorum. - - Note: The default log4j configuration writes onto a file named - "HAJournalServer.log" -- that is where you need to look for errors - and any other information about the running HAJournalServer process. +3. Once Apache ZooKeeper is setup, do: -A brief description of the files in this directory follows: + sudo /etc/init.d bigdataHA start -HAJournal.env - A shell script containing sample configuration values. This - is sourced by the various scripts. You need to review all - of these settings. + This will start the ClassServer, the service registrar (Reggie), + and the HAJournalServer. All of these processes will run inside of + a single JVM named "ServiceStarter". See below for more information + on these services. -HAJournal.config - A sample configuration file for the HAJournalServer. You - need to review the settings in this file as well. +========== KEY FILES ========== -classServer.sh - A shell script that will start the jini class server (for - downloadable code). - -lookupStarter.sh - A shell script that will start the jini service registrar. +/etc/init.d/bigdataHA -HAJournalServer.sh - A shell script that will start the HAJournalServer. + An init.d script to start/stop of bigdata HA. - The server process will create a directory in which it - logs the replicated writes in case other services need to - resynchronize. This directory is named "HALog" by default - and may be located on a normal disk. The ha-log files in - that directory are pure append files and do not need to be - on a fast disk. The ha-log files will be purged at any - commit point when the quorum is fully met. These HALog files - can get large if you are doing a long running update. +/etc/bigdata/bigdataHA.conf - configuration for the HA installation. -log4jHA.properties - A default log4j configuration file for use by the bigdata - services. - -logging.properties - A default Java logging configuration. This may be used - to control the log levels for jini/river components inside - of the bigdata services. Those components use java logging - rather than log4j. + This script is sourced by /etc/init.d/bigdataHA and provides the + critical configuration variables for your installation. The + environment variables set in this script are passed through into + startHAServices and from there into the HAJournal.config file. You + need to review these settings. -policy.all - A default java permissions file. This file grants ALL permissions. - You may specify a more rigorous security policy. +The following are located under the installation root: + +bin/startHAServices + + Script runs the Apache River ServiceStarter. + +bin/disco-tool + + A utility that can be used to identify running Apache River + services. + +doc/ + + Documentation. + +lib/ + + The bigdata jar and other dependencies. + +lib-dl/ + + Downloadable jars for Apache River. + +lib-ext/ + + Security policy provider for Apache River. + +var/config/startHAServices.config + + An Apache River ServiceStarter configuration for starting: + + - ClassServer : This provides downloadable code for the lib-dl + directory required to run Reggie. An instance of this service + is started on every node. + + - Reggie : This is the Apache River service registrar. Bigdata + services discover service registrars using locators and then + register themselves. The service registrar is also used by the + bigdata services to discover one another. The set of locators is + defined using the LOCATORS environment variable in + /etc/bigdata/bigdataHA.config; and + + - HAJournalServer : This is the highly available bigdata graph + database engine and RDF/SPARQL end point. The service process + maintains all of its state in the "serviceDir". The location of + that directory is determined by the FED_DIR environment variable + and the HAJournal.config file. Important files in this + directory include: + + serviceDir/service.id - the assigned ServiceID for this service. + serviceDir/bigdata-ha.jnl - the journal data file. + serviceDir/HALog/* - the transaction log files. + serviceDir/snapshot - full backups of the journal. + +var/config/HAJournal.config + + An Apache River configuration file for HAJournalServer. You should + review the settings in this file. The most relevant will be the + configuration parameters for the default kb instance, especially + whether it is in triples mode or quads mode. The configuration + parameters for the journal are also specified in this file. Many, + but not all, parameters can be overridden through environment + variables defined in /etc/bigdata/bigdataHA.config. This file is + also used to configure the online backup policy (snapshotPolicy) and + point in time restore window (restorePolicy). + +var/logging/log4jHA.properties + + The HAJournalServer log4j configuration file. Note: The default + log4j configuration writes onto a file named "HAJournalServer.log" + -- that is where you need to look for errors and any other + information about the running HAJournalServer process. + +var/logging/logging.properties + + A default Java logging configuration. This may be used to control + the log levels for jini/river components inside of the bigdata + services. Those components use java logging rather than log4j. + +var/policy/policy.all + + A default java permissions file. This file grants ALL permissions. + You may specify a more rigorous security policy. Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README.dev =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README.dev (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README.dev 2013-11-25 16:45:46 UTC (rev 7591) @@ -0,0 +1,82 @@ +This directory contains some sample configuration for a highly available +Journal. + +Note: The bigdata scripts bundled in this directory are designed to be run +from the root directory of the SVN checkout of the bigdata code base. This +is used for developers. The installation is done using the top-level ant +build file and the "ant deploy-artifact" target. + +The basic procedure is: + +0. The nodes MUST have synchronized clocks, both for logging and to ensure + that the transaction services have closely correlated clocks for assigning + timestamps. Make sure that ntp or a similar service is running to synchronize + the clocks. + +1. Edit the various configuration files. You will have to specify the + replicationFactor for the HAJournal in the HAJournal.config file. Make + sure to check all the configuration properties. + +2. Make sure that zookeeper is up and running with a consistent configuration + and that it is logging somewhere where you can find the log later. A good + approach is to use nohup so the console output will wind up in the directory + from which you start zookeeper. Do not put zookeeper in the background or + it can block once the console buffer is full. For a highly available zk + configuration, you need to be running at least 3 zk nodes. Consult the zk + documentation for more information. + +3. Start the ClassServer on each machine. This will let the service registrar + find the downloadable jars on that machine. + +4. Start the service registrar on at least one machine (as configured by + the locators). A highly available jini/river service will run multiple + service registrar and provide either multiple unicast locators or support + multicast discovery of the service registrar. Consult the jini/river + documentation for more information. + +5. Start the HAJournalServer on [k] machines, where [k] is the replication + factor you specified in the HAJournal.config file. The quorum should + meet once (k+1)/2 services join (majority rule). At this point one of + the nodes will be elected as the leader. You can write on that node + (e.g., using SPARQL UPDATE). You can read on any node that is joined + with the met quorum. + + Note: The default log4j configuration writes onto a file named + "HAJournalServer.log" -- that is where you need to look for errors + and any other information about the running HAJournalServer process. + +A brief description of the files in this directory follows: + +HAJournal.env - A shell script containing sample configuration values. This + is sourced by the various scripts. You need to review all + of these settings. + +HAJournal.config - A sample configuration file for the HAJournalServer. You + need to review the settings in this file as well. + +classServer.sh - A shell script that will start the jini class server (for + downloadable code). + +lookupStarter.sh - A shell script that will start the jini service registrar. + +HAJournalServer.sh - A shell script that will start the HAJournalServer. + + The server process will create a directory in which it + logs the replicated writes in case other services need to + resynchronize. This directory is named "HALog" by default + and may be located on a normal disk. The ha-log files in + that directory are pure append files and do not need to be + on a fast disk. The ha-log files will be purged at any + commit point when the quorum is fully met. These HALog files + can get large if you are doing a long running update. + +log4jHA.properties - A default log4j configuration file for use by the bigdata + services. + +logging.properties - A default Java logging configuration. This may be used + to control the log levels for jini/river components inside + of the bigdata services. Those components use java logging + rather than log4j. + +policy.all - A default java permissions file. This file grants ALL permissions. + You may specify a more rigorous security policy. Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA 2013-11-25 16:43:44 UTC (rev 7590) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA 2013-11-25 16:45:46 UTC (rev 7591) @@ -1,131 +0,0 @@ -#!/bin/bash - -# init.d style script for bigdata HA services. The script can be used -# to 'start' or 'stop' services. -# -# Environment: -# -# binDir - The directory containing the installed scripts. -# pidFile - The pid is written on this file. -# -# Misc. -# -# See http://tldp.org/LDP/abs/html/index.html -# -# Note: Blank lines are significant in shell scripts. -# -# Note: Children must do "exit 0" to indicate success. -# -# Note: Convert DOS cr-lf to unix style in emacs: C-x RET f then unix - -# Source function library (just used for 'action'). If you don't have this -# it SHOULD automatically use the inline definition for "action()". -if [ -f "/etc/init.d/functions" ]; then - . /etc/init.d/functions -else -# Run some action. Log its output. No fancy colors. First argument is the -# label for the log file. Remaining arguments are the command to execute -# and its arguments, if any. - action() { - local STRING rc - STRING=$1 - echo -n "$STRING " - shift - "$@" && echo -n "[OK]" || echo -n "[FAILED]" - rc=$? - echo - return $rc - } -fi - -# Where the scripts live. -cd `dirname $0` - -## -# Highly Recommended OS Tuning. -## - -# Do not swap out applications while there is free memory. -#/sbin/sysctl -w vm.swappiness=0 - -# Setup the environment. -source ./bigdataHAEnv - -if [ -z "$binDir" ]; then - echo $"$0 : environment not setup: binDir is undefined." - exit 1; -fi -if [ -z "$pidFile" ]; then - echo $"$0 : environment not setup: pidFile is undefined" - exit 1; -fi - -# -# See how we were called. -# -case "$1" in - start) -# -# Start the ServiceStarter and child services if not running. -# - if [ -f "$pidFile" ]; then - read pid < "$pidFile" - pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) - if [ -z "$pidno" ]; then -# The process has died so remove the old pid file. - echo $"`date` : `hostname` : $pid died?" - rm -f "$pidFile" - fi - fi - if [ ! -f "$pidFile" ]; then - action $"`date` : `hostname` : bringing up services: " $binDir/startHAServices - else - echo $"`date` : `hostname` : running as $pid" - fi - ;; - stop) -# -# Stop the ServiceStarter and all child services. -# - if [ -f "$pidFile" ]; then - read pid < "$pidFile" - pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) - if [ -z "$pidno" ]; then -# The process has died so remove the old pid file. - echo $"`date` : `hostname` : $pid died?" - rm -f "$pidFile" - else - action $"`date` : `hostname` : bringing down services: " kill $pid - rm -f "$pidFile" - fi - fi - ;; - status) -# -# Report status for the ServicesManager (up or down). -# - if [ -f "$pidFile" ]; then - read pid < "$pidFile" - pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) - if [ -z "$pidno" ]; then - echo $"`date` : `hostname` : process died? pid=$pid." - else - echo $"`date` : `hostname` : running as $pid." - fi - else - echo $"`date` : `hostname` : not running." - fi - ;; - restart) - $0 stop - $0 start - ;; - *) -# -# Usage -# - echo $"Usage: $0 {start|stop|status|restart}" - exit 1 -esac - -exit 0 Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHAEnv =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHAEnv 2013-11-25 16:43:44 UTC (rev 7590) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHAEnv 2013-11-25 16:45:46 UTC (rev 7591) @@ -1,11 +0,0 @@ -# Environment for bigdata HA services. -# -# binDir - The directory containing the installed scripts. -# pidFile - The pid is written on this file. -# -# Note: You MUST provide the location of the executable scripts and the -# pid file that is written by $binDir/startHAServices. These SHOULD be -# absolute path names. - -#binDir= -#pidFile= Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2013-11-25 16:43:44 UTC (rev 7590) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2013-11-25 16:45:46 UTC (rev 7591) @@ -65,17 +65,6 @@ export ZK_SERVERS="bigdata15:2081,bigdata16:2081,bigdata17:2081"; fi -# All of these have defaults. -#export REPLICATION_FACTOR=3 -#export HA_PORT=9090 -#export NSS_PORT=8080 -#export QUERY_THREAD_POOL_SIZE= -#export COLLECT_QUEUE_STATISTICS= -#export COLLECT_PLATFORM_STATISTICS= -#export GANGLIA_REPORT= -#export GANGLIA_LISTENER= -#export SYSSTAT_DIR= - export HA_OPTS="\ -DFEDNAME=${FEDNAME}\ -DLOGICAL_SERVICE_ID=${LOGICAL_SERVICE_ID}\ Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/bigdata/bigdataHA.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/bigdata/bigdataHA.config (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/bigdata/bigdataHA.config 2013-11-25 16:45:46 UTC (rev 7591) @@ -0,0 +1,45 @@ +# Environment for bigdata HA services. +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Note: You MUST provide the location of the executable scripts and the +# pid file that is written by $binDir/startHAServices. These SHOULD be +# absolute path names. + +#binDir= +#pidFile= + +## +# The following variables configure the startHAServices script, which +# passes them through to HAJournal.config. +## + +# Name of the bigdata gederation of services. Override for real install. +export FEDNAME=bigdataInstallTest + +# This is different for each HA replication cluster in the same federation +# of services. If you have multiple such replication cluster, then just +# given each such cluster its own name. +export LOGICAL_SERVICE_ID=HAJournalServer-1 + +# Local directory where the service will store its state. +export FED_DIR=/var/bigdata/${FEDNAME} + +# Apache River - NO default for "LOCATORS". +export GROUPS="$FEDNAME" +#export LOCATORS="jini://bigdata15/,jini://bigdata16/,jini://bigdata17/" + +# Apache ZooKeeper - NO default. +#export ZK_SERVERS="bigdata15:2081,bigdata16:2081,bigdata17:2081"; + +# All of these have defaults. Override as necessary. +#export REPLICATION_FACTOR=3 +#export HA_PORT=9090 +#export NSS_PORT=8080 +#export QUERY_THREAD_POOL_SIZE= +#export COLLECT_QUEUE_STATISTICS= +#export COLLECT_PLATFORM_STATISTICS= +#export GANGLIA_REPORT= +#export GANGLIA_LISTENER= +#export SYSSTAT_DIR= Copied: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA (from rev 7589, branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA) =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2013-11-25 16:45:46 UTC (rev 7591) @@ -0,0 +1,131 @@ +#!/bin/bash + +# init.d style script for bigdata HA services. The script can be used +# to 'start' or 'stop' services. +# +# Environment: +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Misc. +# +# See http://tldp.org/LDP/abs/html/index.html +# +# Note: Blank lines are significant in shell scripts. +# +# Note: Children must do "exit 0" to indicate success. +# +# Note: Convert DOS cr-lf to unix style in emacs: C-x RET f then unix + +# Source function library (just used for 'action'). If you don't have this +# it SHOULD automatically use the inline definition for "action()". +if [ -f "/etc/init.d/functions" ]; then + . /etc/init.d/functions +else +# Run some action. Log its output. No fancy colors. First argument is the +# label for the log file. Remaining arguments are the command to execute +# and its arguments, if any. + action() { + local STRING rc + STRING=$1 + echo -n "$STRING " + shift + "$@" && echo -n "[OK]" || echo -n "[FAILED]" + rc=$? + echo + return $rc + } +fi + +# Where the scripts live. +cd `dirname $0` + +## +# Highly Recommended OS Tuning. +## + +# Do not swap out applications while there is free memory. +#/sbin/sysctl -w vm.swappiness=0 + +# Setup the environment. +source bigdata/bigdataHA.config + +if [ -z "$binDir" ]; then + echo $"$0 : environment not setup: binDir is undefined." + exit 1; +fi +if [ -z "$pidFile" ]; then + echo $"$0 : environment not setup: pidFile is undefined" + exit 1; +fi + +# +# See how we were called. +# +case "$1" in + start) +# +# Start the ServiceStarter and child services if not running. +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then +# The process has died so remove the old pid file. + echo $"`date` : `hostname` : $pid died?" + rm -f "$pidFile" + fi + fi + if [ ! -f "$pidFile" ]; then + action $"`date` : `hostname` : bringing up services: " $binDir/startHAServices + else + echo $"`date` : `hostname` : running as $pid" + fi + ;; + stop) +# +# Stop the ServiceStarter and all child services. +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then +# The process has died so remove the old pid file. + echo $"`date` : `hostname` : $pid died?" + rm -f "$pidFile" + else + action $"`date` : `hostname` : bringing down services: " kill $pid + rm -f "$pidFile" + fi + fi + ;; + status) +# +# Report status for the ServicesManager (up or down). +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then + echo $"`date` : `hostname` : process died? pid=$pid." + else + echo $"`date` : `hostname` : running as $pid." + fi + else + echo $"`date` : `hostname` : not running." + fi + ;; + restart) + $0 stop + $0 start + ;; + *) +# +# Usage +# + echo $"Usage: $0 {start|stop|status|restart}" + exit 1 +esac + +exit 0 This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-25 16:43:51
|
Revision: 7590 http://bigdata.svn.sourceforge.net/bigdata/?rev=7590&view=rev Author: thompsonbry Date: 2013-11-25 16:43:44 +0000 (Mon, 25 Nov 2013) Log Message: ----------- revised spec. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec 2013-11-24 21:56:05 UTC (rev 7589) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec 2013-11-25 16:43:44 UTC (rev 7590) @@ -1,4 +1,4 @@ -Summary: bigdata RDF/graph database +Summary: bigdata highly available RDF/graph/SPARQL database Name: bigdata Version: 1.2.3 Release: 1 @@ -19,6 +19,7 @@ %package javadoc Summary: API documentation for %{name}-%{version} +BuildArch: noarch %description javadoc API documentation for %{name}-%{version} @@ -27,20 +28,39 @@ %setup -q %build +# NOP: The RPM is generated from "binaries". +#echo "BUILD: pwd=`pwd`" +#ls -l %install rm -rf $RPM_BUILD_ROOT +#echo "RPM_BUILD_ROOT=$RPM_BUILD_ROOT" +#echo "INSTALL: pwd=`pwd`" +# copy files from BUILD to BUILDROOT +%{__cp} -Rip %{_builddir}/* $RPM_BUILD_ROOT %clean rm -rf $RPM_BUILD_ROOT - %files %defattr(-,root,root,-) -%doc +%doc /doc +#%config /etc/bigdata +%config /var/config +/bin/bigdataHA +/bin/bigdataHAEnv +/bin/config/browser.config +/bin/config/disco-logging.properties +/bin/config/disco.config +/bin/config/reggie.config +/bin/config/serviceStarter.config +/bin/disco-tool +/bin/pstart +/bin/startHAServices +/lib-dl +/lib-ext +/lib - %changelog * Sun Nov 24 2013 EC2 Default User <ec2...@ip...ernal> - -- Initial build. - +- Initial packaging as rpm. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-24 21:56:11
|
Revision: 7589 http://bigdata.svn.sourceforge.net/bigdata/?rev=7589&view=rev Author: thompsonbry Date: 2013-11-24 21:56:05 +0000 (Sun, 24 Nov 2013) Log Message: ----------- initial draft of an rpm install. this generates an rpm, but does not actually put anything into place when you do rpm -i yet. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/ branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-24 14:38:01 UTC (rev 7588) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-24 21:56:05 UTC (rev 7589) @@ -1218,6 +1218,46 @@ </target> + <!-- FIXME DEBUG and add 'depends="javadoc, stage" (should stage stage javadoc?)' --> + <!-- Note: can require 'rpm' and 'rpm-build. --> + <!-- TODO: We do not need both this and "deploy-artifact". --> + <target name="rpm" depends="prepare" description="Build RPM installer."> + <property name="rpm.dir" value="${build.dir}/rpm" /> + <property name="stage.dir" value="dist" /> + <mkdir dir="${rpm.dir}" /> + <mkdir dir="${rpm.dir}/BUILD" /> + <mkdir dir="${rpm.dir}/BUILDROOT" /> + <mkdir dir="${rpm.dir}/RPMS" /> + <mkdir dir="${rpm.dir}/SOURCES" /> + <mkdir dir="${rpm.dir}/SPECS" /> + <mkdir dir="${rpm.dir}/SRPMS" /> + <copy file="${bigdata.dir}/src/resources/rpm/bigdata.spec" todir="${rpm.dir}/SPECS"/> + <!-- build version of tarball that includes the version number in the filename. --> + <copydir dest="${rpm.dir}/SOURCES/${version}" src="${stage.dir}/bigdata" /> + <tar destfile="${rpm.dir}/SOURCES/${version}.tar.gz" + compression="gzip"> + <tarfileset dir="${rpm.dir}/SOURCES"> + <include name="${version}/**" /> + <exclude name="${version}/bin/disco-tool" /> + <exclude name="${version}/bin/pstart" /> + <exclude name="${version}/bin/startHAServices" /> + </tarfileset> + <!-- Add scripts separately, making them executable --> + <tarfileset dir="${rpm.dir}/SOURCES" filemode="755"> + <include name="${version}/bin/disco-tool" /> + <include name="${version}/bin/pstart" /> + <include name="${version}/bin/startHAServices" /> + </tarfileset> + </tar> +<!-- <copy file="${bigdata.dir}/REL.${version}.tgz" tofile="${rpm.dir}/SOURCES/${version}.tar.gz"/> --> + <rpm + specFile="bigdata.spec" + topDir="ant-build/rpm" + cleanBuildDir="true" + failOnError="false"/> + + </target> + <target name="ant-install-prepare" depends="jar, bundle" description="Stage all files (src, lib, config, etc.) needed for ant based install."> Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/rpm/bigdata.spec 2013-11-24 21:56:05 UTC (rev 7589) @@ -0,0 +1,46 @@ +Summary: bigdata RDF/graph database +Name: bigdata +Version: 1.2.3 +Release: 1 +License: GPLv2 +Group: Applications/Databases +URL: http://www.bigdata.com/blog +Source0: %{name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root +BuildArch: noarch + +Requires: java + +%description + +Bigdata is a horizontally-scaled, open-source architecture for indexed data with an emphasis on RDF capable of loading 1B triples in under one hour on a 15 node cluster. Bigdata operates in both a single machine mode (Journal), highly available replication cluster mode (HAJournalServer), and a horizontally sharded cluster mode (BigdataFederation). The Journal provides fast scalable ACID indexed storage for very large data sets, up to 50 billion triples / quads. The HAJournalServer adds replication, online backup, horizontal scaling of query, and high availability. The federation provides fast scalable shard-wise parallel indexed storage using dynamic sharding and shard-wise ACID updates and incremental cluster size growth. Both platforms support fully concurrent readers with snapshot isolation. + +Distributed processing offers greater throughput but does not reduce query or update latency. Choose the Journal when the anticipated scale and throughput requirements permit. Choose the HAJournalServer for high availability and linear scaling in query throughput. Choose the Federation when the administrative and machine overhead associated with operating a cluster is an acceptable tradeoff to have essentially unlimited data scaling and throughput. + +%package javadoc +Summary: API documentation for %{name}-%{version} + +%description javadoc +API documentation for %{name}-%{version} + +%prep +%setup -q + +%build + +%install +rm -rf $RPM_BUILD_ROOT + +%clean +rm -rf $RPM_BUILD_ROOT + + +%files +%defattr(-,root,root,-) +%doc + + +%changelog +* Sun Nov 24 2013 EC2 Default User <ec2...@ip...ernal> - +- Initial build. + This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-24 14:38:10
|
Revision: 7588 http://bigdata.svn.sourceforge.net/bigdata/?rev=7588&view=rev Author: thompsonbry Date: 2013-11-24 14:38:01 +0000 (Sun, 24 Nov 2013) Log Message: ----------- moved river and zk defaults to after bigdata defaults. added draft of 1.3.0 release notes. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_0.txt Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_0.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_0.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_0.txt 2013-11-24 14:38:01 UTC (rev 7588) @@ -0,0 +1,299 @@ +This is a minor release of bigdata(R). + +Bigdata is a horizontally-scaled, open-source architecture for indexed data with an emphasis on RDF capable of loading 1B triples in under one hour on a 15 node cluster. Bigdata operates in both a single machine mode (Journal), highly available replication cluster mode (HAJournalServer), and a horizontally sharded cluster mode (BigdataFederation). The Journal provides fast scalable ACID indexed storage for very large data sets, up to 50 billion triples / quads. The HAJournalServer adds replication, online backup, horizontal scaling of query, and high availability. The federation provides fast scalable shard-wise parallel indexed storage using dynamic sharding and shard-wise ACID updates and incremental cluster size growth. Both platforms support fully concurrent readers with snapshot isolation. + +Distributed processing offers greater throughput but does not reduce query or update latency. Choose the Journal when the anticipated scale and throughput requirements permit. Choose the HAJournalServer for high availability and linear scaling in query throughput. Choose the BigdataFederation when the administrative and machine overhead associated with operating a cluster is an acceptable tradeoff to have essentially unlimited data scaling and throughput. + +See [1,2,8] for instructions on installing bigdata(R), [4] for the javadoc, and [3,5,6] for news, questions, and the latest developments. For more information about SYSTAP, LLC and bigdata, see [7]. + +Starting with the 1.0.0 release, we offer a WAR artifact [8] for easy installation of the single machine RDF database. For custom development and cluster installations we recommend checking out the code from SVN using the tag for this release. The code will build automatically under eclipse. You can also build the code using the ant script. The cluster installer requires the use of the ant script. + +You can download the WAR from: + +http://sourceforge.net/projects/bigdata/ + +You can checkout this release from: + +https://bigdata.svn.sourceforge.net/svnroot/bigdata/tags/BIGDATA_RELEASE_1_3_0 + +New features: + +- SPARQL 1.1 Update Extensions (SPARQL UPDATE for named solution sets). See https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=SPARQL_Update for more information. +- SPARQL 1.1 Property Paths. +- Remote Java client for Multi-Tenancy extensions NanoSparqlServer +- Sesame 2.6.10 dependency +- Plus numerous other bug fixes and performance enhancements. + +Feature summary: + +- Single machine data storage to ~50B triples/quads (RWStore); +- Clustered data storage is essentially unlimited; +- Simple embedded and/or webapp deployment (NanoSparqlServer); +- Triples, quads, or triples with provenance (SIDs); +- Fast RDFS+ inference and truth maintenance; +- Fast 100% native SPARQL 1.1 evaluation; +- Integrated "analytic" query package; +- %100 Java memory manager leverages the JVM native heap (no GC); + +Road map [3]: + +- High availability for the journal and the cluster. +- Runtime Query Optimizer for Analytic Query mode; and +- Simplified deployment, configuration, and administration for clusters. + +Change log: + + Note: Versions with (*) MAY require data migration. For details, see [9]. + +1.2.4: + +- http://sourceforge.net/apps/trac/bigdata/ticket/777 (ConcurrentModificationException in ASTComplexOptionalOptimizer) + +1.2.3: + +- http://sourceforge.net/apps/trac/bigdata/ticket/168 (Maven Build) +- http://sourceforge.net/apps/trac/bigdata/ticket/196 (Journal leaks memory). +- http://sourceforge.net/apps/trac/bigdata/ticket/235 (Occasional deadlock in CI runs in com.bigdata.io.writecache.TestAll) +- http://sourceforge.net/apps/trac/bigdata/ticket/312 (CI (mock) quorums deadlock) +- http://sourceforge.net/apps/trac/bigdata/ticket/405 (Optimize hash join for subgroups with no incoming bound vars.) +- http://sourceforge.net/apps/trac/bigdata/ticket/412 (StaticAnalysis#getDefinitelyBound() ignores exogenous variables.) +- http://sourceforge.net/apps/trac/bigdata/ticket/485 (RDFS Plus Profile) +- http://sourceforge.net/apps/trac/bigdata/ticket/495 (SPARQL 1.1 Property Paths) +- http://sourceforge.net/apps/trac/bigdata/ticket/519 (Negative parser tests) +- http://sourceforge.net/apps/trac/bigdata/ticket/531 (SPARQL UPDATE for SOLUTION SETS) +- http://sourceforge.net/apps/trac/bigdata/ticket/535 (Optimize JOIN VARS for Sub-Selects) +- http://sourceforge.net/apps/trac/bigdata/ticket/555 (Support PSOutputStream/InputStream at IRawStore) +- http://sourceforge.net/apps/trac/bigdata/ticket/559 (Use RDFFormat.NQUADS as the format identifier for the NQuads parser) +- http://sourceforge.net/apps/trac/bigdata/ticket/570 (MemoryManager Journal does not implement all methods). +- http://sourceforge.net/apps/trac/bigdata/ticket/575 (NSS Admin API) +- http://sourceforge.net/apps/trac/bigdata/ticket/577 (DESCRIBE with OFFSET/LIMIT needs to use sub-select) +- http://sourceforge.net/apps/trac/bigdata/ticket/578 (Concise Bounded Description (CBD)) +- http://sourceforge.net/apps/trac/bigdata/ticket/579 (CONSTRUCT should use distinct SPO filter) +- http://sourceforge.net/apps/trac/bigdata/ticket/583 (VoID in ServiceDescription) +- http://sourceforge.net/apps/trac/bigdata/ticket/586 (RWStore immedateFree() not removing Checkpoint addresses from the historical index cache.) +- http://sourceforge.net/apps/trac/bigdata/ticket/590 (nxparser fails with uppercase language tag) +- http://sourceforge.net/apps/trac/bigdata/ticket/592 (Optimize RWStore allocator sizes) +- http://sourceforge.net/apps/trac/bigdata/ticket/593 (Ugrade to Sesame 2.6.10) +- http://sourceforge.net/apps/trac/bigdata/ticket/594 (WAR was deployed using TRIPLES rather than QUADS by default) +- http://sourceforge.net/apps/trac/bigdata/ticket/596 (Change web.xml parameter names to be consistent with Jini/River) +- http://sourceforge.net/apps/trac/bigdata/ticket/597 (SPARQL UPDATE LISTENER) +- http://sourceforge.net/apps/trac/bigdata/ticket/598 (B+Tree branching factor and HTree addressBits are confused in their NodeSerializer implementations) +- http://sourceforge.net/apps/trac/bigdata/ticket/599 (BlobIV for blank node : NotMaterializedException) +- http://sourceforge.net/apps/trac/bigdata/ticket/600 (BlobIV collision counter hits false limit.) +- http://sourceforge.net/apps/trac/bigdata/ticket/601 (Log uncaught exceptions) +- http://sourceforge.net/apps/trac/bigdata/ticket/602 (RWStore does not discard logged deletes on reset()) +- http://sourceforge.net/apps/trac/bigdata/ticket/607 (History service / index) +- http://sourceforge.net/apps/trac/bigdata/ticket/608 (LOG BlockingBuffer not progressing at INFO or lower level) +- http://sourceforge.net/apps/trac/bigdata/ticket/609 (bigdata-ganglia is required dependency for Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/611 (The code that processes SPARQL Update has a typo) +- http://sourceforge.net/apps/trac/bigdata/ticket/612 (Bigdata scale-up depends on zookeper) +- http://sourceforge.net/apps/trac/bigdata/ticket/613 (SPARQL UPDATE response inlines large DELETE or INSERT triple graphs) +- http://sourceforge.net/apps/trac/bigdata/ticket/614 (static join optimizer does not get ordering right when multiple tails share vars with ancestry) +- http://sourceforge.net/apps/trac/bigdata/ticket/615 (AST2BOpUtility wraps UNION with an unnecessary hash join) +- http://sourceforge.net/apps/trac/bigdata/ticket/616 (Row store read/update not isolated on Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/617 (Concurrent KB create fails with "No axioms defined?") +- http://sourceforge.net/apps/trac/bigdata/ticket/618 (DirectBufferPool.poolCapacity maximum of 2GB) +- http://sourceforge.net/apps/trac/bigdata/ticket/619 (RemoteRepository class should use application/x-www-form-urlencoded for large POST requests) +- http://sourceforge.net/apps/trac/bigdata/ticket/620 (UpdateServlet fails to parse MIMEType when doing conneg.) +- http://sourceforge.net/apps/trac/bigdata/ticket/626 (Expose performance counters for read-only indices) +- http://sourceforge.net/apps/trac/bigdata/ticket/627 (Environment variable override for NSS properties file) +- http://sourceforge.net/apps/trac/bigdata/ticket/628 (Create a bigdata-client jar for the NSS REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/631 (ClassCastException in SIDs mode query) +- http://sourceforge.net/apps/trac/bigdata/ticket/632 (NotMaterializedException when a SERVICE call needs variables that are provided as query input bindings) +- http://sourceforge.net/apps/trac/bigdata/ticket/633 (ClassCastException when binding non-uri values to a variable that occurs in predicate position) +- http://sourceforge.net/apps/trac/bigdata/ticket/638 (Change DEFAULT_MIN_RELEASE_AGE to 1ms) +- http://sourceforge.net/apps/trac/bigdata/ticket/640 (Conditionally rollback() BigdataSailConnection if dirty) +- http://sourceforge.net/apps/trac/bigdata/ticket/642 (Property paths do not work inside of exists/not exists filters) +- http://sourceforge.net/apps/trac/bigdata/ticket/643 (Add web.xml parameters to lock down public NSS end points) +- http://sourceforge.net/apps/trac/bigdata/ticket/644 (Bigdata2Sesame2BindingSetIterator can fail to notice asynchronous close()) +- http://sourceforge.net/apps/trac/bigdata/ticket/650 (Can not POST RDF to a graph using REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/654 (Rare AssertionError in WriteCache.clearAddrMap()) +- http://sourceforge.net/apps/trac/bigdata/ticket/655 (SPARQL REGEX operator does not perform case-folding correctly for Unicode data) +- http://sourceforge.net/apps/trac/bigdata/ticket/656 (InFactory bug when IN args consist of a single literal) +- http://sourceforge.net/apps/trac/bigdata/ticket/647 (SIDs mode creates unnecessary hash join for GRAPH group patterns) +- http://sourceforge.net/apps/trac/bigdata/ticket/667 (Provide NanoSparqlServer initialization hook) +- http://sourceforge.net/apps/trac/bigdata/ticket/669 (Doubly nested subqueries yield no results with LIMIT) +- http://sourceforge.net/apps/trac/bigdata/ticket/675 (Flush indices in parallel during checkpoint to reduce IO latency) +- http://sourceforge.net/apps/trac/bigdata/ticket/682 (AtomicRowFilter UnsupportedOperationException) + +1.2.2: + +- http://sourceforge.net/apps/trac/bigdata/ticket/586 (RWStore immedateFree() not removing Checkpoint addresses from the historical index cache.) +- http://sourceforge.net/apps/trac/bigdata/ticket/602 (RWStore does not discard logged deletes on reset()) +- http://sourceforge.net/apps/trac/bigdata/ticket/603 (Prepare critical maintenance release as branch of 1.2.1) + +1.2.1: + +- http://sourceforge.net/apps/trac/bigdata/ticket/533 (Review materialization for inline IVs) +- http://sourceforge.net/apps/trac/bigdata/ticket/539 (NotMaterializedException with REGEX and Vocab) +- http://sourceforge.net/apps/trac/bigdata/ticket/540 (SPARQL UPDATE using NSS via index.html) +- http://sourceforge.net/apps/trac/bigdata/ticket/541 (MemoryManaged backed Journal mode) +- http://sourceforge.net/apps/trac/bigdata/ticket/546 (Index cache for Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/549 (BTree can not be cast to Name2Addr (MemStore recycler)) +- http://sourceforge.net/apps/trac/bigdata/ticket/550 (NPE in Leaf.getKey() : root cause was user error) +- http://sourceforge.net/apps/trac/bigdata/ticket/558 (SPARQL INSERT not working in same request after INSERT DATA) +- http://sourceforge.net/apps/trac/bigdata/ticket/562 (Sub-select in INSERT cause NPE in UpdateExprBuilder) +- http://sourceforge.net/apps/trac/bigdata/ticket/563 (DISTINCT ORDER BY) +- http://sourceforge.net/apps/trac/bigdata/ticket/567 (Failure to set cached value on IV results in incorrect behavior for complex UPDATE operation) +- http://sourceforge.net/apps/trac/bigdata/ticket/568 (DELETE WHERE fails with Java AssertionError) +- http://sourceforge.net/apps/trac/bigdata/ticket/569 (LOAD-CREATE-LOAD using virgin journal fails with "Graph exists" exception) +- http://sourceforge.net/apps/trac/bigdata/ticket/571 (DELETE/INSERT WHERE handling of blank nodes) +- http://sourceforge.net/apps/trac/bigdata/ticket/573 (NullPointerException when attempting to INSERT DATA containing a blank node) + +1.2.0: (*) + +- http://sourceforge.net/apps/trac/bigdata/ticket/92 (Monitoring webapp) +- http://sourceforge.net/apps/trac/bigdata/ticket/267 (Support evaluation of 3rd party operators) +- http://sourceforge.net/apps/trac/bigdata/ticket/337 (Compact and efficient movement of binding sets between nodes.) +- http://sourceforge.net/apps/trac/bigdata/ticket/433 (Cluster leaks threads under read-only index operations: DGC thread leak) +- http://sourceforge.net/apps/trac/bigdata/ticket/437 (Thread-local cache combined with unbounded thread pools causes effective memory leak: termCache memory leak & thread-local buffers) +- http://sourceforge.net/apps/trac/bigdata/ticket/438 (KeyBeforePartitionException on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/439 (Class loader problem) +- http://sourceforge.net/apps/trac/bigdata/ticket/441 (Ganglia integration) +- http://sourceforge.net/apps/trac/bigdata/ticket/443 (Logger for RWStore transaction service and recycler) +- http://sourceforge.net/apps/trac/bigdata/ticket/444 (SPARQL query can fail to notice when IRunningQuery.isDone() on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/445 (RWStore does not track tx release correctly) +- http://sourceforge.net/apps/trac/bigdata/ticket/446 (HTTP Repostory broken with bigdata 1.1.0) +- http://sourceforge.net/apps/trac/bigdata/ticket/448 (SPARQL 1.1 UPDATE) +- http://sourceforge.net/apps/trac/bigdata/ticket/449 (SPARQL 1.1 Federation extension) +- http://sourceforge.net/apps/trac/bigdata/ticket/451 (Serialization error in SIDs mode on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/454 (Global Row Store Read on Cluster uses Tx) +- http://sourceforge.net/apps/trac/bigdata/ticket/456 (IExtension implementations do point lookups on lexicon) +- http://sourceforge.net/apps/trac/bigdata/ticket/457 ("No such index" on cluster under concurrent query workload) +- http://sourceforge.net/apps/trac/bigdata/ticket/458 (Java level deadlock in DS) +- http://sourceforge.net/apps/trac/bigdata/ticket/460 (Uncaught interrupt resolving RDF terms) +- http://sourceforge.net/apps/trac/bigdata/ticket/461 (KeyAfterPartitionException / KeyBeforePartitionException on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/463 (NoSuchVocabularyItem with LUBMVocabulary for DerivedNumericsExtension) +- http://sourceforge.net/apps/trac/bigdata/ticket/464 (Query statistics do not update correctly on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/465 (Too many GRS reads on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/469 (Sail does not flush assertion buffers before query) +- http://sourceforge.net/apps/trac/bigdata/ticket/472 (acceptTaskService pool size on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/475 (Optimize serialization for query messages on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/476 (Test suite for writeCheckpoint() and recycling for BTree/HTree) +- http://sourceforge.net/apps/trac/bigdata/ticket/478 (Cluster does not map input solution(s) across shards) +- http://sourceforge.net/apps/trac/bigdata/ticket/480 (Error releasing deferred frees using 1.0.6 against a 1.0.4 journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/481 (PhysicalAddressResolutionException against 1.0.6) +- http://sourceforge.net/apps/trac/bigdata/ticket/482 (RWStore reset() should be thread-safe for concurrent readers) +- http://sourceforge.net/apps/trac/bigdata/ticket/484 (Java API for NanoSparqlServer REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/491 (AbstractTripleStore.destroy() does not clear the locator cache) +- http://sourceforge.net/apps/trac/bigdata/ticket/492 (Empty chunk in ThickChunkMessage (cluster)) +- http://sourceforge.net/apps/trac/bigdata/ticket/493 (Virtual Graphs) +- http://sourceforge.net/apps/trac/bigdata/ticket/496 (Sesame 2.6.3) +- http://sourceforge.net/apps/trac/bigdata/ticket/497 (Implement STRBEFORE, STRAFTER, and REPLACE) +- http://sourceforge.net/apps/trac/bigdata/ticket/498 (Bring bigdata RDF/XML parser up to openrdf 2.6.3.) +- http://sourceforge.net/apps/trac/bigdata/ticket/500 (SPARQL 1.1 Service Description) +- http://www.openrdf.org/issues/browse/SES-884 (Aggregation with an solution set as input should produce an empty solution as output) +- http://www.openrdf.org/issues/browse/SES-862 (Incorrect error handling for SPARQL aggregation; fix in 2.6.1) +- http://www.openrdf.org/issues/browse/SES-873 (Order the same Blank Nodes together in ORDER BY) +- http://sourceforge.net/apps/trac/bigdata/ticket/501 (SPARQL 1.1 BINDINGS are ignored) +- http://sourceforge.net/apps/trac/bigdata/ticket/503 (Bigdata2Sesame2BindingSetIterator throws QueryEvaluationException were it should throw NoSuchElementException) +- http://sourceforge.net/apps/trac/bigdata/ticket/504 (UNION with Empty Group Pattern) +- http://sourceforge.net/apps/trac/bigdata/ticket/505 (Exception when using SPARQL sort & statement identifiers) +- http://sourceforge.net/apps/trac/bigdata/ticket/506 (Load, closure and query performance in 1.1.x versus 1.0.x) +- http://sourceforge.net/apps/trac/bigdata/ticket/508 (LIMIT causes hash join utility to log errors) +- http://sourceforge.net/apps/trac/bigdata/ticket/513 (Expose the LexiconConfiguration to Function BOPs) +- http://sourceforge.net/apps/trac/bigdata/ticket/515 (Query with two "FILTER NOT EXISTS" expressions returns no results) +- http://sourceforge.net/apps/trac/bigdata/ticket/516 (REGEXBOp should cache the Pattern when it is a constant) +- http://sourceforge.net/apps/trac/bigdata/ticket/517 (Java 7 Compiler Compatibility) +- http://sourceforge.net/apps/trac/bigdata/ticket/518 (Review function bop subclass hierarchy, optimize datatype bop, etc.) +- http://sourceforge.net/apps/trac/bigdata/ticket/520 (CONSTRUCT WHERE shortcut) +- http://sourceforge.net/apps/trac/bigdata/ticket/521 (Incremental materialization of Tuple and Graph query results) +- http://sourceforge.net/apps/trac/bigdata/ticket/525 (Modify the IChangeLog interface to support multiple agents) +- http://sourceforge.net/apps/trac/bigdata/ticket/527 (Expose timestamp of LexiconRelation to function bops) +- http://sourceforge.net/apps/trac/bigdata/ticket/532 (ClassCastException during hash join (can not be cast to TermId)) +- http://sourceforge.net/apps/trac/bigdata/ticket/533 (Review materialization for inline IVs) +- http://sourceforge.net/apps/trac/bigdata/ticket/534 (BSBM BI Q5 error using MERGE JOIN) + +1.1.0 (*) + + - http://sourceforge.net/apps/trac/bigdata/ticket/23 (Lexicon joins) + - http://sourceforge.net/apps/trac/bigdata/ticket/109 (Store large literals as "blobs") + - http://sourceforge.net/apps/trac/bigdata/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://sourceforge.net/apps/trac/bigdata/ticket/203 (Implement an persistence capable hash table to support analytic query) + - http://sourceforge.net/apps/trac/bigdata/ticket/209 (AccessPath should visit binding sets rather than elements for high level query.) + - http://sourceforge.net/apps/trac/bigdata/ticket/227 (SliceOp appears to be necessary when operator plan should suffice without) + - http://sourceforge.net/apps/trac/bigdata/ticket/232 (Bottom-up evaluation semantics). + - http://sourceforge.net/apps/trac/bigdata/ticket/246 (Derived xsd numeric data types must be inlined as extension types.) + - http://sourceforge.net/apps/trac/bigdata/ticket/254 (Revisit pruning of intermediate variable bindings during query execution) + - http://sourceforge.net/apps/trac/bigdata/ticket/261 (Lift conditions out of subqueries.) + - http://sourceforge.net/apps/trac/bigdata/ticket/300 (Native ORDER BY) + - http://sourceforge.net/apps/trac/bigdata/ticket/324 (Inline predeclared URIs and namespaces in 2-3 bytes) + - http://sourceforge.net/apps/trac/bigdata/ticket/330 (NanoSparqlServer does not locate "html" resources when run from jar) + - http://sourceforge.net/apps/trac/bigdata/ticket/334 (Support inlining of unicode data in the statement indices.) + - http://sourceforge.net/apps/trac/bigdata/ticket/364 (Scalable default graph evaluation) + - http://sourceforge.net/apps/trac/bigdata/ticket/368 (Prune variable bindings during query evaluation) + - http://sourceforge.net/apps/trac/bigdata/ticket/370 (Direct translation of openrdf AST to bigdata AST) + - http://sourceforge.net/apps/trac/bigdata/ticket/373 (Fix StrBOp and other IValueExpressions) + - http://sourceforge.net/apps/trac/bigdata/ticket/377 (Optimize OPTIONALs with multiple statement patterns.) + - http://sourceforge.net/apps/trac/bigdata/ticket/380 (Native SPARQL evaluation on cluster) + - http://sourceforge.net/apps/trac/bigdata/ticket/387 (Cluster does not compute closure) + - http://sourceforge.net/apps/trac/bigdata/ticket/395 (HTree hash join performance) + - http://sourceforge.net/apps/trac/bigdata/ticket/401 (inline xsd:unsigned datatypes) + - http://sourceforge.net/apps/trac/bigdata/ticket/408 (xsd:string cast fails for non-numeric data) + - http://sourceforge.net/apps/trac/bigdata/ticket/421 (New query hints model.) + - http://sourceforge.net/apps/trac/bigdata/ticket/431 (Use of read-only tx per query defeats cache on cluster) + +1.0.3 + + - http://sourceforge.net/apps/trac/bigdata/ticket/217 (BTreeCounters does not track bytes released) + - http://sourceforge.net/apps/trac/bigdata/ticket/269 (Refactor performance counters using accessor interface) + - http://sourceforge.net/apps/trac/bigdata/ticket/329 (B+Tree should delete bloom filter when it is disabled.) + - http://sourceforge.net/apps/trac/bigdata/ticket/372 (RWStore does not prune the CommitRecordIndex) + - http://sourceforge.net/apps/trac/bigdata/ticket/375 (Persistent memory leaks (RWStore/DISK)) + - http://sourceforge.net/apps/trac/bigdata/ticket/385 (FastRDFValueCoder2: ArrayIndexOutOfBoundsException) + - http://sourceforge.net/apps/trac/bigdata/ticket/391 (Release age advanced on WORM mode journal) + - http://sourceforge.net/apps/trac/bigdata/ticket/392 (Add a DELETE by access path method to the NanoSparqlServer) + - http://sourceforge.net/apps/trac/bigdata/ticket/393 (Add "context-uri" request parameter to specify the default context for INSERT in the REST API) + - http://sourceforge.net/apps/trac/bigdata/ticket/394 (log4j configuration error message in WAR deployment) + - http://sourceforge.net/apps/trac/bigdata/ticket/399 (Add a fast range count method to the REST API) + - http://sourceforge.net/apps/trac/bigdata/ticket/422 (Support temp triple store wrapped by a BigdataSail) + - http://sourceforge.net/apps/trac/bigdata/ticket/424 (NQuads support for NanoSparqlServer) + - http://sourceforge.net/apps/trac/bigdata/ticket/425 (Bug fix to DEFAULT_RDF_FORMAT for bulk data loader in scale-out) + - http://sourceforge.net/apps/trac/bigdata/ticket/426 (Support either lockfile (procmail) and dotlockfile (liblockfile1) in scale-out) + - http://sourceforge.net/apps/trac/bigdata/ticket/427 (BigdataSail#getReadOnlyConnection() race condition with concurrent commit) + - http://sourceforge.net/apps/trac/bigdata/ticket/435 (Address is 0L) + - http://sourceforge.net/apps/trac/bigdata/ticket/436 (TestMROWTransactions failure in CI) + +1.0.2 + + - http://sourceforge.net/apps/trac/bigdata/ticket/32 (Query time expansion of (foo rdf:type rdfs:Resource) drags in SPORelation for scale-out.) + - http://sourceforge.net/apps/trac/bigdata/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://sourceforge.net/apps/trac/bigdata/ticket/356 (Query not terminated by error.) + - http://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://sourceforge.net/apps/trac/bigdata/ticket/361 (IRunningQuery not closed promptly.) + - http://sourceforge.net/apps/trac/bigdata/ticket/371 (DataLoader fails to load resources available from the classpath.) + - http://sourceforge.net/apps/trac/bigdata/ticket/376 (Support for the streaming of bigdata IBindingSets into a sparql query.) + - http://sourceforge.net/apps/trac/bigdata/ticket/378 (ClosedByInterruptException during heavy query mix.) + - http://sourceforge.net/apps/trac/bigdata/ticket/379 (NotSerializableException for SPOAccessPath.) + - http://sourceforge.net/apps/trac/bigdata/ticket/382 (Change dependencies to Apache River 2.2.0) + +1.0.1 (*) + + - http://sourceforge.net/apps/trac/bigdata/ticket/107 (Unicode clean schema names in the sparse row store). + - http://sourceforge.net/apps/trac/bigdata/ticket/124 (TermIdEncoder should use more bits for scale-out). + - http://sourceforge.net/apps/trac/bigdata/ticket/225 (OSX requires specialized performance counter collection classes). + - http://sourceforge.net/apps/trac/bigdata/ticket/348 (BigdataValueFactory.asValue() must return new instance when DummyIV is used). + - http://sourceforge.net/apps/trac/bigdata/ticket/349 (TermIdEncoder limits Journal to 2B distinct RDF Values per triple/quad store instance). + - http://sourceforge.net/apps/trac/bigdata/ticket/351 (SPO not Serializable exception in SIDS mode (scale-out)). + - http://sourceforge.net/apps/trac/bigdata/ticket/352 (ClassCastException when querying with binding-values that are not known to the database). + - http://sourceforge.net/apps/trac/bigdata/ticket/353 (UnsupportedOperatorException for some SPARQL queries). + - http://sourceforge.net/apps/trac/bigdata/ticket/355 (Query failure when comparing with non materialized value). + - http://sourceforge.net/apps/trac/bigdata/ticket/357 (RWStore reports "FixedAllocator returning null address, with freeBits".) + - http://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://sourceforge.net/apps/trac/bigdata/ticket/362 (log4j - slf4j bridge.) + +For more information about bigdata(R), please see the following links: + +[1] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Main_Page +[2] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=GettingStarted +[3] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Roadmap +[4] http://www.bigdata.com/bigdata/docs/api/ +[5] http://sourceforge.net/projects/bigdata/ +[6] http://www.bigdata.com/blog +[7] http://www.systap.com/bigdata.htm +[8] http://sourceforge.net/projects/bigdata/files/bigdata/ +[9] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=DataMigration + +About bigdata: + +Bigdata(R) is a horizontally-scaled, general purpose storage and computing fabric for ordered data (B+Trees), designed to operate on either a single server or a cluster of commodity hardware. Bigdata(R) uses dynamically partitioned key-range shards in order to remove any realistic scaling limits - in principle, bigdata(R) may be deployed on 10s, 100s, or even thousands of machines and new capacity may be added incrementally without requiring the full reload of all data. The bigdata(R) RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), and datum level provenance. Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2013-11-23 12:46:59 UTC (rev 7587) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2013-11-24 14:38:01 UTC (rev 7588) @@ -31,13 +31,6 @@ ## export JVM_OPTS="-server -Xmx4G -XX:MaxDirectMemorySize=3000m" -# Apache River -export GROUPS="$FEDNAME" -export LOCATORS="jini://bigdata15/,jini://bigdata16/,jini://bigdata17/" - -# Apache Zookeeper -export ZK_SERVERS="bigdata15:2081,bigdata16:2081,bigdata17:2081"; - ## # HAJournalServer configuration parameter overrides (see HAJournal.config). # @@ -62,6 +55,15 @@ if [ -z "${FED_DIR}" ]; then export FED_DIR=$INSTALL_DIR fi +if [ -z "${GROUPS}" ]; then + export GROUPS="$FEDNAME" +fi +if [ -z "${LOCATORS}" ]; then + export LOCATORS="jini://bigdata15/,jini://bigdata16/,jini://bigdata17/" +fi +if [ -z "${ZK_SERVERS}" ]; then + export ZK_SERVERS="bigdata15:2081,bigdata16:2081,bigdata17:2081"; +fi # All of these have defaults. #export REPLICATION_FACTOR=3 This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-23 12:47:05
|
Revision: 7587 http://bigdata.svn.sourceforge.net/bigdata/?rev=7587&view=rev Author: thompsonbry Date: 2013-11-23 12:46:59 +0000 (Sat, 23 Nov 2013) Log Message: ----------- Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/TestMonitoring.sh Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/TestMonitoring.sh =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/TestMonitoring.sh (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/TestMonitoring.sh 2013-11-23 12:46:59 UTC (rev 7587) @@ -0,0 +1,12 @@ +#!/bin/bash + +# Setup the environment. +source src/resources/HAJournal/HAJournal.env + +java\ + ${JAVAOPTS}\ + -cp ${CLASSPATH}\ + -Djava.security.policy=${POLICY_FILE}\ + -Dlog4j.configuration=${LOG4J_CONFIG}\ + -Djava.util.logging.config.file=${LOGGING_CONFIG}\ + com.bigdata.counters.AbstractStatisticsCollector 1 10 This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-23 12:43:49
|
Revision: 7586 http://bigdata.svn.sourceforge.net/bigdata/?rev=7586&view=rev Author: thompsonbry Date: 2013-11-23 12:43:40 +0000 (Sat, 23 Nov 2013) Log Message: ----------- Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/NICUtil.sh Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/NICUtil.sh =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/NICUtil.sh (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/NICUtil.sh 2013-11-23 12:43:40 UTC (rev 7586) @@ -0,0 +1,13 @@ +#!/bin/bash + +# Setup the environment. +source src/resources/HAJournal/HAJournal.env + +# +java\ + ${JAVAOPTS}\ + -cp ${CLASSPATH}\ + -Djava.security.policy=${POLICY_FILE}\ + -Dlog4j.configuration=${LOG4J_CONFIG}\ + -Djava.util.logging.config.file=${LOGGING_CONFIG}\ + com.bigdata.util.config.NicUtil This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-22 21:03:09
|
Revision: 7585 http://bigdata.svn.sourceforge.net/bigdata/?rev=7585&view=rev Author: thompsonbry Date: 2013-11-22 21:03:03 +0000 (Fri, 22 Nov 2013) Log Message: ----------- I have added an init.d style script: bigdataHA (start|stop|status|restart) This script relies on a bigdataHAEnv that MUST define the following variables to specify the location of the installed scripts. These variables SHOULD use absolute path names. binDir pidFile This script could be used by an rpm or other installer to install the HA replication cluster as an init.d style service on a linux platform. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHAEnv Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-22 19:17:14 UTC (rev 7584) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-22 21:03:03 UTC (rev 7585) @@ -1051,6 +1051,13 @@ todir="${dist.bin}" /> <chmod file="${dist.bin}/startHAServices" perm="755" /> + <copy file="${src.resources}/bin/bigdataHA" + todir="${dist.bin}" /> + <chmod file="${dist.bin}/bigdataHA" perm="755" /> + + <copy file="${src.resources}/bin/bigdataHAEnv" + todir="${dist.bin}" /> + <copy file="${src.resources}/bin/config/browser.config" todir="${dist.bin.config}" /> <copy file="${src.resources}/bin/config/reggie.config" Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA 2013-11-22 21:03:03 UTC (rev 7585) @@ -0,0 +1,131 @@ +#!/bin/bash + +# init.d style script for bigdata HA services. The script can be used +# to 'start' or 'stop' services. +# +# Environment: +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Misc. +# +# See http://tldp.org/LDP/abs/html/index.html +# +# Note: Blank lines are significant in shell scripts. +# +# Note: Children must do "exit 0" to indicate success. +# +# Note: Convert DOS cr-lf to unix style in emacs: C-x RET f then unix + +# Source function library (just used for 'action'). If you don't have this +# it SHOULD automatically use the inline definition for "action()". +if [ -f "/etc/init.d/functions" ]; then + . /etc/init.d/functions +else +# Run some action. Log its output. No fancy colors. First argument is the +# label for the log file. Remaining arguments are the command to execute +# and its arguments, if any. + action() { + local STRING rc + STRING=$1 + echo -n "$STRING " + shift + "$@" && echo -n "[OK]" || echo -n "[FAILED]" + rc=$? + echo + return $rc + } +fi + +# Where the scripts live. +cd `dirname $0` + +## +# Highly Recommended OS Tuning. +## + +# Do not swap out applications while there is free memory. +#/sbin/sysctl -w vm.swappiness=0 + +# Setup the environment. +source ./bigdataHAEnv + +if [ -z "$binDir" ]; then + echo $"$0 : environment not setup: binDir is undefined." + exit 1; +fi +if [ -z "$pidFile" ]; then + echo $"$0 : environment not setup: pidFile is undefined" + exit 1; +fi + +# +# See how we were called. +# +case "$1" in + start) +# +# Start the ServiceStarter and child services if not running. +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then +# The process has died so remove the old pid file. + echo $"`date` : `hostname` : $pid died?" + rm -f "$pidFile" + fi + fi + if [ ! -f "$pidFile" ]; then + action $"`date` : `hostname` : bringing up services: " $binDir/startHAServices + else + echo $"`date` : `hostname` : running as $pid" + fi + ;; + stop) +# +# Stop the ServiceStarter and all child services. +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then +# The process has died so remove the old pid file. + echo $"`date` : `hostname` : $pid died?" + rm -f "$pidFile" + else + action $"`date` : `hostname` : bringing down services: " kill $pid + rm -f "$pidFile" + fi + fi + ;; + status) +# +# Report status for the ServicesManager (up or down). +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then + echo $"`date` : `hostname` : process died? pid=$pid." + else + echo $"`date` : `hostname` : running as $pid." + fi + else + echo $"`date` : `hostname` : not running." + fi + ;; + restart) + $0 stop + $0 start + ;; + *) +# +# Usage +# + echo $"Usage: $0 {start|stop|status|restart}" + exit 1 +esac + +exit 0 Property changes on: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA ___________________________________________________________________ Added: svn:executable + * Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHAEnv =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHAEnv (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHAEnv 2013-11-22 21:03:03 UTC (rev 7585) @@ -0,0 +1,11 @@ +# Environment for bigdata HA services. +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Note: You MUST provide the location of the executable scripts and the +# pid file that is written by $binDir/startHAServices. These SHOULD be +# absolute path names. + +#binDir= +#pidFile= This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-22 19:17:20
|
Revision: 7584 http://bigdata.svn.sourceforge.net/bigdata/?rev=7584&view=rev Author: thompsonbry Date: 2013-11-22 19:17:14 +0000 (Fri, 22 Nov 2013) Log Message: ----------- Modified the HAJournal.config script to use a new getProperty() method that returns the default if the value for the property is an empty string (after trimming whitespace). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/jini/util/ConfigMath.java branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/jini/util/ConfigMath.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/jini/util/ConfigMath.java 2013-11-22 19:09:13 UTC (rev 7583) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/jini/util/ConfigMath.java 2013-11-22 19:17:14 UTC (rev 7584) @@ -427,4 +427,30 @@ } + /** + * Return the value for the named property -or- the default value if the + * property name is not defined or evaluates to an empty string after + * trimming any whitespace. + * + * @param key + * The property name. + * @param def + * The default value. + * @return The value for the named property -or- the default value if the + * property name is not defined or evaluates to an empty string + * after trimming any whitespace. + */ + public static String getProperty(final String key, final String def) { + + String tmp = System.getProperty(key); + + if (tmp == null || tmp.trim().length() == 0) { + + return def; + } + + return tmp; + + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2013-11-22 19:09:13 UTC (rev 7583) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2013-11-22 19:17:14 UTC (rev 7584) @@ -60,25 +60,25 @@ /* The name of the federation (also constrains the discovery groups and * provides a zk namespace). This can be overridden from the environment. */ - private static fedname = System.getProperty("FEDNAME","benchmark"); + private static fedname = ConfigMath.getProperty("FEDNAME","benchmark"); // NanoSparqlServer (http) port. - private static nssPort = Integer.parseInt(System.getProperty("NSS_PORT","8090")); + private static nssPort = Integer.parseInt(ConfigMath.getProperty("NSS_PORT","8090")); // write replication pipeline port (listener). - private static haPort = Integer.parseInt(System.getProperty("HA_PORT","9090")); + private static haPort = Integer.parseInt(ConfigMath.getProperty("HA_PORT","9090")); // The #of services in the write pipeline. - private static replicationFactor = Integer.parseInt(System.getProperty("REPLICATION_FACTOR","3")); + private static replicationFactor = Integer.parseInt(ConfigMath.getProperty("REPLICATION_FACTOR","3")); // The logical service identifier shared by all members of the quorum. - private static logicalServiceId = System.getProperty("LOGICAL_SERVICE_ID","HAJournal-1"); + private static logicalServiceId = ConfigMath.getProperty("LOGICAL_SERVICE_ID","HAJournal-1"); // The ServiceID for *this* service -or- null to assign it dynamically. private static serviceId = null; // The base directory for the federation. - private static fedDir = new File(System.getProperty("FED_DIR","."),fedname); + private static fedDir = new File(ConfigMath.getProperty("FED_DIR","."),fedname); // The service directory (if serviceId is null, then you must override). // private static serviceDir = new File(fedname,""+serviceId); @@ -116,7 +116,7 @@ //static private groups = LookupDiscovery.ALL_GROUPS; // unicast discovery or multiple setups, MUST specify groups. - static private groups = ConfigMath.getGroups(System.getProperty("GROUPS",bigdata.fedname)); + static private groups = ConfigMath.getGroups(ConfigMath.getProperty("GROUPS",bigdata.fedname)); /** * One or more unicast URIs of the form <code>jini://host/</code> @@ -126,7 +126,7 @@ * discovery <strong>and</strong> you have specified the groups as * LookupDiscovery.ALL_GROUPS (a <code>null</code>). */ - static private locators = ConfigMath.getLocators(System.getProperty("LOCATORS","jini://bigdata15/,jini://bigdata16/,jini://bigdata17/")); + static private locators = ConfigMath.getLocators(ConfigMath.getProperty("LOCATORS","jini://bigdata15/,jini://bigdata16/,jini://bigdata17/")); /** * A common point to set the Zookeeper client's requested @@ -232,7 +232,7 @@ * the CLIENT port for the zookeeper server instance. */ // ensemble - servers = System.getProperty("ZK_SERVERS","bigdata15:2081,bigdata16:2081,bigdata17:2081"); + servers = ConfigMath.getProperty("ZK_SERVERS","bigdata15:2081,bigdata16:2081,bigdata17:2081"); /* Session timeout (optional). */ sessionTimeout = bigdata.sessionTimeout; @@ -347,19 +347,19 @@ // performance counters for internal queues. new NV(Journal.Options.COLLECT_QUEUE_STATISTICS, - System.getProperty("COLLECT_QUEUE_STATISTICS","false")), + ConfigMath.getProperty("COLLECT_QUEUE_STATISTICS","false")), // platform and process performance counters (requires external s/w on some platforms) new NV(Journal.Options.COLLECT_PLATFORM_STATISTICS, - System.getProperty("COLLECT_PLATFORM_STATISTICS","false")), + ConfigMath.getProperty("COLLECT_PLATFORM_STATISTICS","false")), // uses bigdata-ganglia module to report service metrics to ganglia. new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT, - System.getProperty("GANGLIA_REPORT","false")), + ConfigMath.getProperty("GANGLIA_REPORT","false")), // uses bigdata-ganglia module to build internal model of cluster load. new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN, - System.getProperty("GANGLIA_LISTENER","false")), + ConfigMath.getProperty("GANGLIA_LISTENER","false")), }, bigdata.kb); @@ -374,7 +374,7 @@ create = true; - queryThreadPoolSize = Integer.parseInt(System.getProperty("QUERY_THREAD_POOL_SIZE","16")); + queryThreadPoolSize = Integer.parseInt(ConfigMath.getProperty("QUERY_THREAD_POOL_SIZE","16")); describeEachNamedGraph = true; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-22 19:09:19
|
Revision: 7583 http://bigdata.svn.sourceforge.net/bigdata/?rev=7583&view=rev Author: thompsonbry Date: 2013-11-22 19:09:13 +0000 (Fri, 22 Nov 2013) Log Message: ----------- Failed to pass through the HA_OPTs.... Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2013-11-22 15:49:49 UTC (rev 7582) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2013-11-22 19:09:13 UTC (rev 7583) @@ -74,7 +74,7 @@ #export GANGLIA_LISTENER= #export SYSSTAT_DIR= -export HAOPTS="\ +export HA_OPTS="\ -DFEDNAME=${FEDNAME}\ -DLOGICAL_SERVICE_ID=${LOGICAL_SERVICE_ID}\ -DFED_DIR=${FED_DIR}\ @@ -107,6 +107,7 @@ export JAVA_OPTS="\ ${JVM_OPTS}\ + ${HA_OPTS}\ -Djava.security.policy=${POLICY_FILE}\ -Djava.util.logging.config.file=${LOGGING_CONFIG}\ -Dlog4j.configuration=${LOG4J_CONFIG}\ @@ -115,7 +116,6 @@ -DCONFIG_DIR=${CONFIG_DIR}\ -DPOLICY_FILE=${POLICY_FILE}\ -DJINI_CLASS_SERVER_PORT=${JINI_CLASS_SERVER_PORT}\ - -DFEDNAME=${FEDNAME}\ -DHAJOURNAL_CLASSPATH=${HAJOURNAL_CLASSPATH}\ " This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-22 15:49:56
|
Revision: 7582 http://bigdata.svn.sourceforge.net/bigdata/?rev=7582&view=rev Author: thompsonbry Date: 2013-11-22 15:49:49 +0000 (Fri, 22 Nov 2013) Log Message: ----------- Added ability to set the river locators and groups and the zookeeper servers from the environment outside of the startHAServices script. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2013-11-22 15:43:38 UTC (rev 7581) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2013-11-22 15:49:49 UTC (rev 7582) @@ -116,7 +116,7 @@ //static private groups = LookupDiscovery.ALL_GROUPS; // unicast discovery or multiple setups, MUST specify groups. - static private groups = new String[]{bigdata.fedname}; + static private groups = ConfigMath.getGroups(System.getProperty("GROUPS",bigdata.fedname)); /** * One or more unicast URIs of the form <code>jini://host/</code> @@ -126,15 +126,8 @@ * discovery <strong>and</strong> you have specified the groups as * LookupDiscovery.ALL_GROUPS (a <code>null</code>). */ - static private locators = new LookupLocator[] { - - // runs jini on one or more hosts using unicast locators. - new LookupLocator("jini://bigdata15/"), - new LookupLocator("jini://bigdata16/"), - new LookupLocator("jini://bigdata17/"), - - }; - + static private locators = ConfigMath.getLocators(System.getProperty("LOCATORS","jini://bigdata15/,jini://bigdata16/,jini://bigdata17/")); + /** * A common point to set the Zookeeper client's requested * sessionTimeout and the jini lease timeout. The default lease Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2013-11-22 15:43:38 UTC (rev 7581) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2013-11-22 15:49:49 UTC (rev 7582) @@ -31,6 +31,13 @@ ## export JVM_OPTS="-server -Xmx4G -XX:MaxDirectMemorySize=3000m" +# Apache River +export GROUPS="$FEDNAME" +export LOCATORS="jini://bigdata15/,jini://bigdata16/,jini://bigdata17/" + +# Apache Zookeeper +export ZK_SERVERS="bigdata15:2081,bigdata16:2081,bigdata17:2081"; + ## # HAJournalServer configuration parameter overrides (see HAJournal.config). # @@ -43,12 +50,23 @@ # Note: Many of these properties have defaults. ## -export FEDNAME=installTest -export LOGICAL_SERVICE_ID=HAJournalServer-1 -export FED_DIR=$INSTALL_DIR -export REPLICATION_FACTOR=3 -export HA_PORT=9090 -export NSS_PORT=8080 +# Conditional defaults for required properties. These can (and should) be +# overridden from the environment outside of this script. The defaults are +# not suitable for deployment. +if [ -z "${FEDNAME}" ]; then + export FEDNAME=installTest +fi +if [ -z "${LOGICAL_SERVICE_ID}" ]; then + export LOGICAL_SERVICE_ID=HAJournalServer-1 +fi +if [ -z "${FED_DIR}" ]; then + export FED_DIR=$INSTALL_DIR +fi + +# All of these have defaults. +#export REPLICATION_FACTOR=3 +#export HA_PORT=9090 +#export NSS_PORT=8080 #export QUERY_THREAD_POOL_SIZE= #export COLLECT_QUEUE_STATISTICS= #export COLLECT_PLATFORM_STATISTICS= @@ -56,11 +74,6 @@ #export GANGLIA_LISTENER= #export SYSSTAT_DIR= -#export GROUPS= -#export LOCATORS= - -export ZK_SERVERS="bigdata15:2081,bigdata16:2081,bigdata17:2081"; - export HAOPTS="\ -DFEDNAME=${FEDNAME}\ -DLOGICAL_SERVICE_ID=${LOGICAL_SERVICE_ID}\ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-22 15:43:48
|
Revision: 7581 http://bigdata.svn.sourceforge.net/bigdata/?rev=7581&view=rev Author: thompsonbry Date: 2013-11-22 15:43:38 +0000 (Fri, 22 Nov 2013) Log Message: ----------- Added ability to parse a comma delimited list of URLs as a LookupLocator[]. Added ability to parse a comma delimited list of groups. Integrated into the test suite. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/jini/util/ConfigMath.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/jini/TestAll.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/jini/util/ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/jini/util/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/jini/util/TestConfigMath.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/jini/util/ConfigMath.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/jini/util/ConfigMath.java 2013-11-22 02:09:38 UTC (rev 7580) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/jini/util/ConfigMath.java 2013-11-22 15:43:38 UTC (rev 7581) @@ -28,9 +28,14 @@ package com.bigdata.jini.util; import java.io.File; +import java.net.MalformedURLException; import java.util.concurrent.TimeUnit; +import javax.net.SocketFactory; + import net.jini.config.Configuration; +import net.jini.core.discovery.LookupLocator; +import net.jini.discovery.LookupDiscovery; import com.sun.jini.config.ConfigUtil; @@ -331,5 +336,95 @@ return o != null; } - + + /** + * Parse a comma delimited list of zero or more unicast URIs of the form + * <code>jini://host/</code> or <code>jini://host:port/</code>. + * <p> + * This MAY be an empty array if you want to use multicast discovery + * <strong>and</strong> you have specified the groups as + * {@link LookupDiscovery#ALL_GROUPS} (a <code>null</code>). + * <p> + * Note: This method is intended for overrides expressed from scripts using + * environment variables where we need to parse an interpret the value + * rather than given the value directly in a {@link Configuration} file. As + * a consequence, you can not specify the optional {@link SocketFactory} for + * the {@link LookupLocator} with this method. + * + * @param locators + * The locators, expressed as a comma delimited list of URIs. + * + * @return An array of zero or more {@link LookupLocator}s. + * + * @throws MalformedURLException + * if any of the parse URLs is invalid. + * + * @throws IllegalArgumentException + * if the <i>locators</i> is <code>null</code>. + */ + public static LookupLocator[] getLocators(final String locators) + throws MalformedURLException { + + if (locators == null) + throw new IllegalArgumentException(); + + final String[] a = locators.split(","); + + final LookupLocator[] b = new LookupLocator[a.length]; + + if (a.length == 1 && a[0].trim().length() == 0) { + + return new LookupLocator[0]; + + } + + for (int i = 0; i < a.length; i++) { + + final String urlStr = a[i]; + + final LookupLocator locator = new LookupLocator(urlStr); + + b[i] = locator; + + } + + return b; + + } + + /** + * Return an array of zero or more groups -or- <code>null</code> if the + * given argument is either <code>null</code> or <code>"null"</code>. + * <p> + * Note: a <code>null</code> corresponds to + * {@link LookupDiscovery#ALL_GROUPS}. This option is only permissible when + * you have a single setup and are using multicast discovery. In all other + * cases, you need to specify the group(s). + * + * @param groups + * The groups, expressed as a comma delimited list or zero or + * more groups. + * + * @return A string array parsed out of that argument. + */ + public static String[] getGroups(final String groups) { + + if (groups == null) + return null; + + if (groups.trim().equals("null")) + return null; + + final String[] a = groups.split(","); + + if (a.length == 1 && a[0].trim().length() == 0) { + + return new String[0]; + + } + + return a; + + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/jini/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/jini/TestAll.java 2013-11-22 02:09:38 UTC (rev 7580) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/jini/TestAll.java 2013-11-22 15:43:38 UTC (rev 7581) @@ -79,6 +79,9 @@ final TestSuite suite = new TestSuite("jini"); + // jini configuration helpers. + suite.addTest(com.bigdata.jini.util.TestAll.suite()); + // zookeeper client library (queues, locks, etc). suite.addTest(com.bigdata.zookeeper.TestAll.suite()); Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/jini/util/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/jini/util/TestAll.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/jini/util/TestAll.java 2013-11-22 15:43:38 UTC (rev 7581) @@ -0,0 +1,62 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Jun 26, 2006 + */ +package com.bigdata.jini.util; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +import com.bigdata.service.jini.AbstractServerTestCase; + +/** + * Aggregates tests in dependency order - see {@link AbstractServerTestCase} for + * <strong>required</strong> system properties in order to run this test suite. + * + * @version $Id$ + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class TestAll extends TestCase { + + public TestAll() { + } + + public TestAll(String name) { + super(name); + } + + public static Test suite() { + + final TestSuite suite = new TestSuite(TestAll.class.getPackage() + .getName()); + + suite.addTestSuite(TestConfigMath.class); + + return suite; + + } + +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/jini/util/TestConfigMath.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/jini/util/TestConfigMath.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/jini/util/TestConfigMath.java 2013-11-22 15:43:38 UTC (rev 7581) @@ -0,0 +1,105 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/* + * Created on Jun 26, 2006 + */ +package com.bigdata.jini.util; + +import java.net.MalformedURLException; + +import junit.framework.TestCase2; +import net.jini.core.discovery.LookupLocator; + +public class TestConfigMath extends TestCase2 { + + public TestConfigMath() { + } + + public TestConfigMath(final String name) { + super(name); + } + + public void test_getLocators() throws MalformedURLException { + + assertSameArray( + new LookupLocator[] {// + new LookupLocator("jini://bigdata15/"),// + new LookupLocator("jini://bigdata16/"),// + new LookupLocator("jini://bigdata17/"),// + }, + ConfigMath + .getLocators("jini://bigdata15/,jini://bigdata16/,jini://bigdata17/")); + + } + + public void test_getLocators_empty() throws MalformedURLException { + + assertSameArray(new LookupLocator[] {// + }, ConfigMath.getLocators("")); + + } + + public void test_getLocators_null_arg() throws MalformedURLException { + + try { + + ConfigMath.getLocators(null/* locators */); + + fail("Expecting " + IllegalArgumentException.class); + + } catch (IllegalArgumentException ex) { + + // ignore expected exception + + } + + } + + public void test_getGroups1() throws MalformedURLException { + + assertSameArray(new String[] { "a" }, + ConfigMath.getGroups("a")); + + } + + public void test_getGroups3() throws MalformedURLException { + + assertSameArray(new String[] { "a", "b", "c" }, + ConfigMath.getGroups("a,b,c")); + + } + + public void test_getGroups_empty() { + + assertSameArray(new String[] {}, ConfigMath.getGroups("")); + + } + + public void test_getGroups_null_label() { + + assertEquals(null, ConfigMath.getGroups("null")); + + } + +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-22 02:09:44
|
Revision: 7580 http://bigdata.svn.sourceforge.net/bigdata/?rev=7580&view=rev Author: thompsonbry Date: 2013-11-22 02:09:38 +0000 (Fri, 22 Nov 2013) Log Message: ----------- missed this file in the previous commit. Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2013-11-22 02:09:38 UTC (rev 7580) @@ -0,0 +1,119 @@ +#!/bin/bash + +# Start the services and put the JVM in the background. All services will +# run in a single JVM. See Apache River com.sun.jini.start.ServiceStarter +# for more details. The services are configured in the accompanying +# startHAServices.config file. Specific configuration options for each +# service are defined in the documentation for that service. +# +# Note: One drawback with running each service in the same JVM is that the +# GC load of all services is combined and all services would be suspended +# at the same time by a Full GC pass. If this is a problem, then you can +# break out the river services (ClassServer and Reggie) into a separate +# ServiceStarter instance from the HAJournalServer. + +# The top-level of the installation. +pushd `dirname $0` > /dev/null;cd ..;INSTALL_DIR=`pwd`;popd > /dev/null + +# Setup the directory for the pid of the ServiceStarter process. +lockDir=${INSTALL_DIR}/var/lock +mkdir -p $lockDir +pidFile=$lockDir/pid + +## +# ServiceStarter JVM options. +# +# The ServiceStarter is launched as a JVM with the following JVM options. +# The other services (including the HAJournalServer) will run inside of +# this JVM. This is where you specify the size of the Java heap and the +# size of the direct memory heap (used for the write cache buffers and +# some related things). +## +export JVM_OPTS="-server -Xmx4G -XX:MaxDirectMemorySize=3000m" + +## +# HAJournalServer configuration parameter overrides (see HAJournal.config). +# +# The bigdata HAJournal.config file may be heavily parameterized through +# environment variables that get passed through into the JVM started by +# this script and are thus made available to the HAJournalServer when it +# interprets the contents of the HAJournal.config file. See HAJournal.config +# for the meaning of these environment variables. +# +# Note: Many of these properties have defaults. +## + +export FEDNAME=installTest +export LOGICAL_SERVICE_ID=HAJournalServer-1 +export FED_DIR=$INSTALL_DIR +export REPLICATION_FACTOR=3 +export HA_PORT=9090 +export NSS_PORT=8080 +#export QUERY_THREAD_POOL_SIZE= +#export COLLECT_QUEUE_STATISTICS= +#export COLLECT_PLATFORM_STATISTICS= +#export GANGLIA_REPORT= +#export GANGLIA_LISTENER= +#export SYSSTAT_DIR= + +#export GROUPS= +#export LOCATORS= + +export ZK_SERVERS="bigdata15:2081,bigdata16:2081,bigdata17:2081"; + +export HAOPTS="\ + -DFEDNAME=${FEDNAME}\ + -DLOGICAL_SERVICE_ID=${LOGICAL_SERVICE_ID}\ + -DFED_DIR=${FED_DIR}\ + -DREPLICATION_FACTOR=${REPLICATION_FACTOR}\ + -DHA_PORT=${HA_PORT}\ + -DNSS_PORT=${NSS_PORT}\ + -DQUERY_THREAD_POOL_SIZE=${QUERY_THREAD_POOL_SIZE}\ + -DCOLLECT_QUEUE_STATISTICS=${COLLECT_QUEUE_STATISTICS}\ + -DCOLLECT_PLATFORM_STATISTICS=${COLLECT_PLATFORM_STATISTICS}\ + -DGANGLIA_REPORT=${GANGLIA_REPORT}\ + -DSYSSTAT_DIR=${SYSSTAT_DIR}\ + -Dcom.bigdata.counters.linux.sysstat.path=${SYSSTAT_DIR}\ +" + +## +# ServiceStarter configuration parameters (see startHAServices.conf). +## + +export LIB_DIR=${INSTALL_DIR}/lib +export CONFIG_DIR=${INSTALL_DIR}/var/config +export JINI_CLASS_SERVER_PORT=8081 +export JINI_CONFIG=${CONFIG_DIR}/jini/startHAServices.config +export POLICY_FILE=${CONFIG_DIR}/policy/policy.all +export LOGGING_CONFIG=${CONFIG_DIR}/logging/logging.properties +export LOG4J_CONFIG=${CONFIG_DIR}/logging/log4jHA.properties + +# TODO Explicitly enumerate JARs so we can control order if necessary and +# deploy on OS without find and tr. +export HAJOURNAL_CLASSPATH=`find ${LIB_DIR} -name '*.jar' -print0 | tr '\0' ':'` + +export JAVA_OPTS="\ + ${JVM_OPTS}\ + -Djava.security.policy=${POLICY_FILE}\ + -Djava.util.logging.config.file=${LOGGING_CONFIG}\ + -Dlog4j.configuration=${LOG4J_CONFIG}\ + -DLIB_DIR=${INSTALL_DIR}/lib\ + -DLIBDL_DIR=${INSTALL_DIR}/lib-dl\ + -DCONFIG_DIR=${CONFIG_DIR}\ + -DPOLICY_FILE=${POLICY_FILE}\ + -DJINI_CLASS_SERVER_PORT=${JINI_CLASS_SERVER_PORT}\ + -DFEDNAME=${FEDNAME}\ + -DHAJOURNAL_CLASSPATH=${HAJOURNAL_CLASSPATH}\ +" + +cmd="java ${JAVA_OPTS} \ + -cp ${HAJOURNAL_CLASSPATH} \ + com.sun.jini.start.ServiceStarter \ + ${JINI_CONFIG}" +echo "Running: $cmd" +$cmd& +pid=$! +echo "PID=$pid" +echo "$pid">$pidFile + +# Note: To obtain the pid, do: read pid < "$pidFile" This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-22 02:07:02
|
Revision: 7579 http://bigdata.svn.sourceforge.net/bigdata/?rev=7579&view=rev Author: thompsonbry Date: 2013-11-22 02:06:54 +0000 (Fri, 22 Nov 2013) Log Message: ----------- Added a ServiceStarter based script for launching: - classserver - reggie - HAJournalServer Do "ant deploy-artifact". Untar the resulting archive somewhere to install. edit bin/startHAServices.config to customize. Then do bin/startHAServices to start. Repeat on each node that will run the HAJournalServer. Note: startHAServices saves the pid of the ServiceStarter process. That pid could be used to write an init.d style script to start/stop the services listed above on a given node. Note: You can also do "ant stage" and then edit the dist/bigdata/... files in order to customize a deployment. Then create a tarball from that custom configuration. This tarball can then be wrapped up as an rpm, etc. as desired. This does NOT start zookeeper. The AbstractServer.run() method is now invoked from within HAJournalServer<init>() in order to be compatible with the ServiceStarter. See #766 (AWS installer) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_3_0/build.xml branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.env branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4jHA.properties branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/startHAServices.config Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4j.properties Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java 2013-11-22 01:52:38 UTC (rev 7578) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java 2013-11-22 02:06:54 UTC (rev 7579) @@ -1816,7 +1816,13 @@ } /** - * Run the server (this should be invoked from <code>main</code>. + * Start the HAJournalServer and wait for it to terminate. + * <p> + * Note: This is invoked from within the constructor of the concrete service + * class. This ensures that all initialization of the service is complete + * and is compatible with the Apache River ServiceStarter (doing this in + * main() is not compatible since the ServiceStarter does not expect the + * service to implement Runnable). */ @Override public void run() { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-11-22 01:52:38 UTC (rev 7578) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-11-22 02:06:54 UTC (rev 7579) @@ -556,6 +556,17 @@ super(args, lifeCycle); + /* + * Start the HAJournalServer and wait for it to terminate. + * + * Note: This is invoked from within the constructor of the concrete + * service class. This ensures that all initialization of the service is + * complete and is compatible with the Apache River ServiceStarter + * (doing this in main() is not compatible since the ServiceStarter does + * not expect the service to implement Runnable). + */ + run(); + } /* @@ -4541,9 +4552,19 @@ final HAJournalServer server = new HAJournalServer(args, new FakeLifeCycle()); - // Wait for the HAJournalServer to terminate. - server.run(); + /* + * Note: The server.run() call was pushed into the constructor to be + * compatible with the ServiceStarter pattern. + */ +// // Wait for the HAJournalServer to terminate. +// server.run(); + /* + * Note: The System.exit() call here appears to be required for the + * timely release of allocated ports. Commenting out this line tends to + * cause startup failures in CI due to ports that are already (aka, + * "still") bound. + */ System.exit(0); } Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-22 01:52:38 UTC (rev 7578) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-22 02:06:54 UTC (rev 7579) @@ -1047,6 +1047,10 @@ todir="${dist.bin}" /> <chmod file="${dist.bin}/pstart" perm="755" /> + <copy file="${src.resources}/bin/startHAServices" + todir="${dist.bin}" /> + <chmod file="${dist.bin}/startHAServices" perm="755" /> + <copy file="${src.resources}/bin/config/browser.config" todir="${dist.bin.config}" /> <copy file="${src.resources}/bin/config/reggie.config" @@ -1082,6 +1086,10 @@ <copy file="${server.log4j.from.file}" todir="${logging.to.path}" /> + <property name="haserver.log4j.from.file" location="${src.resources}/HAJournal/log4jHA.properties" /> + <copy file="${haserver.log4j.from.file}" + todir="${logging.to.path}" /> + <!-- Stage service-specific logging config file --> <property name="bigdata-jini.root" location="${bigdata-jini.dir}/src/java/com/bigdata" /> @@ -1108,6 +1116,12 @@ <copy file="${src.resources.config}/bigdataCluster16.config" todir="${dist.var.config.jini}" /> + <!-- Stage the HAJournal service config file --> + <copy file="${src.resources}/HAJournal/HAJournal.config" + todir="${dist.var.config.jini}" /> + <copy file="${src.resources}/HAJournal/startHAServices.config" + todir="${dist.var.config.jini}" /> + <!-- Stage the infrastructure service config files --> <copy file="${src.resources.config}/jini/reggie.config" @@ -1117,7 +1131,7 @@ <copy file="${src.resources.config}/jini/startAll.config" todir="${dist.var.config.jini}" /> - <!-- Stage top-level license file and copyright NOTICE file. --> + <!-- Stage top-level license file and copyright NOTICE file. --> <copy toDir="${dist.doc}"> <fileset file="${bigdata.dir}/LICENSE.txt"/> <fileset file="${bigdata.dir}/NOTICE"/> @@ -1183,6 +1197,7 @@ <include name="bigdata/**" /> <exclude name="bigdata/bin/disco-tool" /> <exclude name="bigdata/bin/pstart" /> + <exclude name="bigdata/bin/startHAServices" /> </tarfileset> <!-- Add scripts separately, making them executable --> @@ -1190,6 +1205,7 @@ <tarfileset dir="${bigdata.dir}/dist" filemode="755"> <include name="bigdata/bin/disco-tool" /> <include name="bigdata/bin/pstart" /> + <include name="bigdata/bin/startHAServices" /> </tarfileset> </tar> @@ -1325,6 +1341,7 @@ <exclude name="dist/bigdata/bin/disco-tool" /> <exclude name="dist/bigdata/bin/pstart" /> + <exclude name="dist/bigdata/bin/startHAServices" /> </tarfileset> <!-- Add dist files separately, minus scripts --> @@ -1333,6 +1350,7 @@ <include name="dist/bigdata/**" /> <exclude name="dist/bigdata/bin/disco-tool" /> <exclude name="dist/bigdata/bin/pstart" /> + <exclude name="dist/bigdata/bin/startHAServices" /> </tarfileset> <!-- Add dist scripts separately, making them executable --> @@ -1340,6 +1358,7 @@ <tarfileset dir="${bigdata.dir}" prefix="${version}" filemode="755"> <include name="dist/bigdata/bin/disco-tool" /> <include name="dist/bigdata/bin/pstart" /> + <include name="dist/bigdata/bin/startHAServices" /> </tarfileset> </tar> Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2013-11-22 01:52:38 UTC (rev 7578) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2013-11-22 02:06:54 UTC (rev 7579) @@ -63,23 +63,27 @@ private static fedname = System.getProperty("FEDNAME","benchmark"); // NanoSparqlServer (http) port. - private static nssPort = 8090; + private static nssPort = Integer.parseInt(System.getProperty("NSS_PORT","8090")); // write replication pipeline port (listener). - private static haPort = 9090; + private static haPort = Integer.parseInt(System.getProperty("HA_PORT","9090")); // The #of services in the write pipeline. - private static replicationFactor = 3; + private static replicationFactor = Integer.parseInt(System.getProperty("REPLICATION_FACTOR","3")); // The logical service identifier shared by all members of the quorum. - private static logicalServiceId = "HAJournal-1"; + private static logicalServiceId = System.getProperty("LOGICAL_SERVICE_ID","HAJournal-1"); // The ServiceID for *this* service -or- null to assign it dynamically. private static serviceId = null; + // The base directory for the federation. + private static fedDir = new File(System.getProperty("FED_DIR","."),fedname); + // The service directory (if serviceId is null, then you must override). // private static serviceDir = new File(fedname,""+serviceId); - private static serviceDir = new File(fedname,logicalServiceId+File.separator+"HAJournalServer"); + //private static serviceDir = new File(fedname,logicalServiceId+File.separator+"HAJournalServer"); + private static serviceDir = new File(fedDir,logicalServiceId+File.separator+"HAJournalServer"); // journal data directory. private static dataDir = serviceDir; @@ -235,7 +239,7 @@ * the CLIENT port for the zookeeper server instance. */ // ensemble - servers = "bigdata15:2081,bigdata16:2081,bigdata17:2081"; + servers = System.getProperty("ZK_SERVERS","bigdata15:2081,bigdata16:2081,bigdata17:2081"); /* Session timeout (optional). */ sessionTimeout = bigdata.sessionTimeout; @@ -349,16 +353,20 @@ */ // performance counters for internal queues. - //new NV(Journal.Options.COLLECT_QUEUE_STATISTICS,"true"), // off by default. + new NV(Journal.Options.COLLECT_QUEUE_STATISTICS, + System.getProperty("COLLECT_QUEUE_STATISTICS","false")), // platform and process performance counters (requires external s/w on some platforms) - //new NV(Journal.Options.COLLECT_PLATFORM_STATISTICS,"true"), // off by default. - + new NV(Journal.Options.COLLECT_PLATFORM_STATISTICS, + System.getProperty("COLLECT_PLATFORM_STATISTICS","false")), + // uses bigdata-ganglia module to report service metrics to ganglia. - //new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT,"true"), off by default. + new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT, + System.getProperty("GANGLIA_REPORT","false")), // uses bigdata-ganglia module to build internal model of cluster load. - //new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN,"true"), // off by default. + new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN, + System.getProperty("GANGLIA_LISTENER","false")), }, bigdata.kb); @@ -373,7 +381,7 @@ create = true; - queryThreadPoolSize = 16; + queryThreadPoolSize = Integer.parseInt(System.getProperty("QUERY_THREAD_POOL_SIZE","16")); describeEachNamedGraph = true; Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.env =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.env 2013-11-22 01:52:38 UTC (rev 7578) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.env 2013-11-22 02:06:54 UTC (rev 7579) @@ -39,7 +39,7 @@ # The log4j configuration file. Each service will log locally unless # you provide otherwise in your logging configuration. -LOG4J_CONFIG=file:src/resources/HAJournal/log4j.properties +LOG4J_CONFIG=file:src/resources/HAJournal/log4jHA.properties # The java logging configuration file. Each service will log locally unless # you provide otherwise in your logging configuration. Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README 2013-11-22 01:52:38 UTC (rev 7578) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README 2013-11-22 02:06:54 UTC (rev 7579) @@ -2,7 +2,9 @@ Journal. Note: The bigdata scripts bundled in this directory are designed to be run -from the root directory of the SVN checkout of the bigdata code base. +from the root directory of the SVN checkout of the bigdata code base. This +is used for developers. The installation is done using the top-level ant +build file and the "ant deploy-artifact" target. The basic procedure is: @@ -68,8 +70,8 @@ commit point when the quorum is fully met. These HALog files can get large if you are doing a long running update. -log4j.properties - A default log4j configuration file for use by the bigdata - services. +log4jHA.properties - A default log4j configuration file for use by the bigdata + services. logging.properties - A default Java logging configuration. This may be used to control the log levels for jini/river components inside @@ -78,4 +80,3 @@ policy.all - A default java permissions file. This file grants ALL permissions. You may specify a more rigorous security policy. - Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4j.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4j.properties 2013-11-22 01:52:38 UTC (rev 7578) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4j.properties 2013-11-22 02:06:54 UTC (rev 7579) @@ -1,80 +0,0 @@ -## -# This is the default log4j configuration for distribution and CI tests. -## - -# Note: logging at INFO or DEBUG will significantly impact throughput! -log4j.rootCategory=WARN, dest2 - -log4j.logger.com.bigdata=WARN -log4j.logger.com.bigdata.btree=WARN -log4j.logger.com.bigdata.counters.History=ERROR -log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=ERROR -log4j.logger.com.bigdata.counters.query.CounterSetQuery=INFO -log4j.logger.com.bigdata.journal.CompactTask=INFO -log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=ERROR -log4j.logger.com.bigdata.rdf.load=INFO -log4j.logger.com.bigdata.rdf.store.DataLoader=INFO -log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=INFO - -log4j.logger.com.bigdata.rdf.sail.webapp.NanoSparqlServer=ALL - -# HA related loggers (debugging only) -#log4j.logger.com.bigdata.ha=INFO -#log4j.logger.com.bigdata.txLog=INFO -#log4j.logger.com.bigdata.haLog=INFO -##log4j.logger.com.bigdata.rwstore=ALL -#log4j.logger.com.bigdata.journal=INFO -##log4j.logger.com.bigdata.journal.AbstractBufferStrategy=ALL -#log4j.logger.com.bigdata.journal.jini.ha=INFO -##log4j.logger.com.bigdata.service.jini.lookup=ALL -#log4j.logger.com.bigdata.quorum=INFO -#log4j.logger.com.bigdata.quorum.zk=INFO -##log4j.logger.com.bigdata.quorum.quorumState=ALL,destPlain -##log4j.logger.com.bigdata.io.writecache=ALL - -# dest2 includes the thread name and elapsed milliseconds. -# Note: %r is elapsed milliseconds. -# Note: %t is the thread name. -# See http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/PatternLayout.html -#log4j.appender.dest2=org.apache.log4j.ConsoleAppender -log4j.appender.dest2=org.apache.log4j.RollingFileAppender -log4j.appender.dest2.File=HAJournalServer.log -log4j.appender.dest2.MaxFileSize=500MB -log4j.appender.dest2.MaxBackupIndex=20 -log4j.appender.dest2.layout=org.apache.log4j.PatternLayout -log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %d{ISO8601} %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n - -## destPlain -#log4j.appender.destPlain=org.apache.log4j.ConsoleAppender -#log4j.appender.destPlain.layout=org.apache.log4j.PatternLayout -#log4j.appender.destPlain.layout.ConversionPattern= - -## -# Summary query evaluation log (tab delimited file). -#log4j.logger.com.bigdata.bop.engine.QueryLog=INFO,queryLog -log4j.additivity.com.bigdata.bop.engine.QueryLog=false -log4j.appender.queryLog=org.apache.log4j.FileAppender -log4j.appender.queryLog.Threshold=ALL -log4j.appender.queryLog.File=queryLog.csv -log4j.appender.queryLog.Append=true -# I find that it is nicer to have this unbuffered since you can see what -# is going on and to make sure that I have complete rule evaluation logs -# on shutdown. -log4j.appender.queryLog.BufferedIO=false -log4j.appender.queryLog.layout=org.apache.log4j.PatternLayout -log4j.appender.queryLog.layout.ConversionPattern=%m - -## -# BOp run state trace (tab delimited file). Uncomment the next line to enable. -#log4j.logger.com.bigdata.bop.engine.RunState$TableLog=INFO,queryRunStateLog -log4j.additivity.com.bigdata.bop.engine.RunState$TableLog=false -log4j.appender.queryRunStateLog=org.apache.log4j.FileAppender -log4j.appender.queryRunStateLog.Threshold=ALL -log4j.appender.queryRunStateLog.File=queryRunState.log -log4j.appender.queryRunStateLog.Append=true -# I find that it is nicer to have this unbuffered since you can see what -# is going on and to make sure that I have complete rule evaluation logs -# on shutdown. -log4j.appender.queryRunStateLog.BufferedIO=false -log4j.appender.queryRunStateLog.layout=org.apache.log4j.PatternLayout -log4j.appender.queryRunStateLog.layout.ConversionPattern=%m Copied: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4jHA.properties (from rev 7501, branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4j.properties) =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4jHA.properties (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4jHA.properties 2013-11-22 02:06:54 UTC (rev 7579) @@ -0,0 +1,80 @@ +## +# This is the default log4j configuration for distribution and CI tests. +## + +# Note: logging at INFO or DEBUG will significantly impact throughput! +log4j.rootCategory=WARN, dest2 + +log4j.logger.com.bigdata=WARN +log4j.logger.com.bigdata.btree=WARN +log4j.logger.com.bigdata.counters.History=ERROR +log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=ERROR +log4j.logger.com.bigdata.counters.query.CounterSetQuery=INFO +log4j.logger.com.bigdata.journal.CompactTask=INFO +log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=ERROR +log4j.logger.com.bigdata.rdf.load=INFO +log4j.logger.com.bigdata.rdf.store.DataLoader=INFO +log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=INFO + +#log4j.logger.com.bigdata.rdf.sail.webapp.NanoSparqlServer=INFO + +# HA related loggers (debugging only) +#log4j.logger.com.bigdata.ha=INFO +#log4j.logger.com.bigdata.txLog=INFO +#log4j.logger.com.bigdata.haLog=INFO +##log4j.logger.com.bigdata.rwstore=ALL +#log4j.logger.com.bigdata.journal=INFO +##log4j.logger.com.bigdata.journal.AbstractBufferStrategy=ALL +#log4j.logger.com.bigdata.journal.jini.ha=INFO +##log4j.logger.com.bigdata.service.jini.lookup=ALL +#log4j.logger.com.bigdata.quorum=INFO +#log4j.logger.com.bigdata.quorum.zk=INFO +##log4j.logger.com.bigdata.quorum.quorumState=ALL,destPlain +##log4j.logger.com.bigdata.io.writecache=ALL + +# dest2 includes the thread name and elapsed milliseconds. +# Note: %r is elapsed milliseconds. +# Note: %t is the thread name. +# See http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/PatternLayout.html +#log4j.appender.dest2=org.apache.log4j.ConsoleAppender +log4j.appender.dest2=org.apache.log4j.RollingFileAppender +log4j.appender.dest2.File=HAJournalServer.log +log4j.appender.dest2.MaxFileSize=500MB +log4j.appender.dest2.MaxBackupIndex=20 +log4j.appender.dest2.layout=org.apache.log4j.PatternLayout +log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %d{ISO8601} %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n + +## destPlain +#log4j.appender.destPlain=org.apache.log4j.ConsoleAppender +#log4j.appender.destPlain.layout=org.apache.log4j.PatternLayout +#log4j.appender.destPlain.layout.ConversionPattern= + +## +# Summary query evaluation log (tab delimited file). +#log4j.logger.com.bigdata.bop.engine.QueryLog=INFO,queryLog +log4j.additivity.com.bigdata.bop.engine.QueryLog=false +log4j.appender.queryLog=org.apache.log4j.FileAppender +log4j.appender.queryLog.Threshold=ALL +log4j.appender.queryLog.File=queryLog.csv +log4j.appender.queryLog.Append=true +# I find that it is nicer to have this unbuffered since you can see what +# is going on and to make sure that I have complete rule evaluation logs +# on shutdown. +log4j.appender.queryLog.BufferedIO=false +log4j.appender.queryLog.layout=org.apache.log4j.PatternLayout +log4j.appender.queryLog.layout.ConversionPattern=%m + +## +# BOp run state trace (tab delimited file). Uncomment the next line to enable. +#log4j.logger.com.bigdata.bop.engine.RunState$TableLog=INFO,queryRunStateLog +log4j.additivity.com.bigdata.bop.engine.RunState$TableLog=false +log4j.appender.queryRunStateLog=org.apache.log4j.FileAppender +log4j.appender.queryRunStateLog.Threshold=ALL +log4j.appender.queryRunStateLog.File=queryRunState.log +log4j.appender.queryRunStateLog.Append=true +# I find that it is nicer to have this unbuffered since you can see what +# is going on and to make sure that I have complete rule evaluation logs +# on shutdown. +log4j.appender.queryRunStateLog.BufferedIO=false +log4j.appender.queryRunStateLog.layout=org.apache.log4j.PatternLayout +log4j.appender.queryRunStateLog.layout.ConversionPattern=%m Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/startHAServices.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/startHAServices.config (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/startHAServices.config 2013-11-22 02:06:54 UTC (rev 7579) @@ -0,0 +1,59 @@ +import java.io.File; +import com.sun.jini.start.NonActivatableServiceDescriptor; +import com.sun.jini.start.ServiceDescriptor; +import com.sun.jini.config.ConfigUtil; + +/* + * Apache River ServiceStarter configuration. + * + * This configuration file is used to start the services required for + * a bigdata Highly Available Replication Cluster (HAJournalServer) on + * each node. + + ClassServer: Provides access to downloadable jars in LIBDL_DIR. + reggie: Provides implementations of ServiceRegistrar. +HAJournalServer: Bigdata HA server instance. + +*/ +com.sun.jini.start { + private static policyFile = System.getProperty("POLICY_FILE"); + private static host = ConfigUtil.getHostName(); + private static port = System.getProperty("JINI_CLASS_SERVER_PORT"); + private static jskdl = " http://" + host + ":" + port + "/jsk-dl.jar"; + private static libDir = System.getProperty("LIB_DIR"); + private static libDLDir = System.getProperty("LIBDL_DIR"); + private static configDir = System.getProperty("CONFIG_DIR")+File.separator+"jini"+File.separator; + + serviceDescriptors = new ServiceDescriptor[]{ + + // ClassServer + new NonActivatableServiceDescriptor( + "", + policyFile, + libDir+"classserver.jar", + "com.sun.jini.tool.ClassServer", + new String[]{ + "-port", port, + "-dir", libDLDir, + "-verbose" + }), + + // Service Registrar (aka LookUp Service aka LUS) + new NonActivatableServiceDescriptor( + "http://" + host + ":" + port + "/reggie-dl.jar" + jskdl, + policyFile, + libDir+"reggie.jar", + "com.sun.jini.reggie.TransientRegistrarImpl", + new String[] { configDir+"reggie.config" }), + + // HAJournalServer + new NonActivatableServiceDescriptor( + "", // TODO code base URL? + policyFile, + System.getProperty("HAJOURNAL_CLASSPATH"), + "com.bigdata.journal.jini.ha.HAJournalServer", + new String[] { configDir+"HAJournal.config" }) + + }; + +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-22 01:53:02
|
Revision: 7578 http://bigdata.svn.sourceforge.net/bigdata/?rev=7578&view=rev Author: thompsonbry Date: 2013-11-22 01:52:38 +0000 (Fri, 22 Nov 2013) Log Message: ----------- javadoc and made the logger private. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/linux/SysstatUtil.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/linux/SysstatUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/linux/SysstatUtil.java 2013-11-21 16:48:15 UTC (rev 7577) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/linux/SysstatUtil.java 2013-11-22 01:52:38 UTC (rev 7578) @@ -36,8 +36,6 @@ import org.apache.log4j.Logger; -import com.bigdata.counters.AbstractStatisticsCollector; - /** * Some utility methods related to integration with <code>sysstat</code>. * @@ -46,14 +44,19 @@ */ public class SysstatUtil { - final protected static Logger log = Logger - .getLogger(AbstractStatisticsCollector.class); + final private static Logger log = Logger.getLogger(SysstatUtil.class); public interface Options { + /** + * The name of the optional property whose value specifies the default + * location of the SYSSTAT package (pidstat, iostat, etc) (default + * {@value #DEFAULT_PATH}). + * + * @see #DEFAULT_PATH + */ + String PATH = "com.bigdata.counters.linux.sysstat.path"; - String PATH = "com.bigdata.counters.linux.sysstat.path"; - - String DEFAULT_PATH = "/usr/bin"; + String DEFAULT_PATH = "/usr/bin"; } /** @@ -73,8 +76,8 @@ static public final File getPath(final String cmd) { File f, path; - final File configuredPath = path = new File(System.getProperty(Options.PATH, - Options.DEFAULT_PATH)); + final File configuredPath = path = new File(System.getProperty( + Options.PATH, Options.DEFAULT_PATH)); if (log.isInfoEnabled()) log.info(Options.PATH + "=" + configuredPath); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jer...@us...> - 2013-11-21 16:48:26
|
Revision: 7577 http://bigdata.svn.sourceforge.net/bigdata/?rev=7577&view=rev Author: jeremy_carroll Date: 2013-11-21 16:48:15 +0000 (Thu, 21 Nov 2013) Log Message: ----------- Reverted static flag to control Join mode during eval testing. Refactored AbstractDataDrivenSPARQLTestCase to allow for subclasses that have test data inline as well as in separate files. Migrated TestUnionMinus to inline test data. Added additional MINUS test cases Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataDrivenSPARQLTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestInclude.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTCK.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTickets.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestUnionMinus.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestServiceRegistry.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataAndSPARQLTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractInlineSELECTTestCase.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java 2013-11-20 22:41:35 UTC (rev 7576) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java 2013-11-21 16:48:15 UTC (rev 7577) @@ -11,7 +11,6 @@ import com.bigdata.rdf.sparql.ast.PathNode.PathMod; import com.bigdata.rdf.sparql.ast.eval.AST2BOpBase; import com.bigdata.rdf.sparql.ast.optimizers.StaticOptimizer; -import com.bigdata.rdf.store.ITripleStore; /** * A special kind of AST node that represents the SPARQL 1.1 arbitrary length Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java 2013-11-20 22:41:35 UTC (rev 7576) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java 2013-11-21 16:48:15 UTC (rev 7577) @@ -53,11 +53,6 @@ * FIXME Rolling back r7319 which broke UNION processing. */ public class AST2BOpContext implements IdFactory, IEvaluationContext { - - /** - * This field is public non-final so that we can change its value during testing. - */ - public static boolean DEFAULT_NATIVE_HASH_JOINS = QueryHints.DEFAULT_NATIVE_HASH_JOINS; /** * The {@link ASTContainer} @@ -153,7 +148,7 @@ * * @see QueryHints#NATIVE_HASH_JOINS */ - public boolean nativeHashJoins = DEFAULT_NATIVE_HASH_JOINS; + public boolean nativeHashJoins = QueryHints.DEFAULT_NATIVE_HASH_JOINS; /** * When <code>true</code>, a merge-join pattern will be recognized if it Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataAndSPARQLTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataAndSPARQLTestCase.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataAndSPARQLTestCase.java 2013-11-21 16:48:15 UTC (rev 7577) @@ -0,0 +1,224 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created Nov 2013 + */ + +/* +Portions of this code are: + +Copyright Aduna (http://www.aduna-software.com/) � 2001-2007 + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +package com.bigdata.rdf.sparql.ast.eval; + +import java.io.IOException; +import java.io.InputStream; + +import org.openrdf.model.Resource; +import org.openrdf.model.Statement; +import org.openrdf.model.URI; +import org.openrdf.model.Value; +import org.openrdf.model.impl.URIImpl; +import org.openrdf.query.QueryEvaluationException; +import org.openrdf.query.TupleQueryResult; +import org.openrdf.rio.RDFFormat; +import org.openrdf.rio.RDFHandlerException; +import org.openrdf.rio.RDFParser; +import org.openrdf.rio.RDFParserRegistry; +import org.openrdf.rio.helpers.RDFHandlerBase; + +import com.bigdata.bop.engine.AbstractQueryEngineTestCase; +import com.bigdata.rdf.model.StatementEnum; +import com.bigdata.rdf.rio.StatementBuffer; +import com.bigdata.rdf.sparql.ast.ASTContainer; +import com.bigdata.rdf.sparql.ast.AbstractASTEvaluationTestCase; +import com.bigdata.rdf.store.AbstractTripleStore; + +public abstract class AbstractDataAndSPARQLTestCase extends AbstractASTEvaluationTestCase { + + public class AbsHelper { + + protected final String queryStr; + /** + * This is the astContainer of the last query executed. + */ + protected ASTContainer astContainer; + + public AbsHelper(String queryStr) { + this.queryStr = queryStr; + } + + protected AbstractTripleStore getTripleStore() { + + return store; + + } + + protected void compareTupleQueryResults(final TupleQueryResult queryResult, final TupleQueryResult expectedResult, final boolean checkOrder) + throws QueryEvaluationException { + AbstractQueryEngineTestCase.compareTupleQueryResults(getName(), + "", store, astContainer, queryResult, expectedResult, + false, checkOrder); + } + + + long loadData(final InputStream is, RDFFormat format, String uri) { + final RDFParser rdfParser = RDFParserRegistry.getInstance().get(format).getParser(); + + rdfParser.setValueFactory(store.getValueFactory()); + + rdfParser.setVerifyData(true); + + rdfParser.setStopAtFirstError(true); + + rdfParser.setDatatypeHandling(RDFParser.DatatypeHandling.IGNORE); + + final AddStatementHandler handler = new AddStatementHandler(); + + handler.setContext(new URIImpl(uri)); + + rdfParser.setRDFHandler(handler); + + /* + * Run the parser, which will cause statements to be inserted. + */ + + + try { + + rdfParser.parse(is, baseURI); + + return handler.close(); + + } catch (Exception e) { + + throw new RuntimeException(e); + + } finally { + + try { + + is.close(); + + } catch (IOException e) { + + throw new RuntimeException(e); + + } + + } + } + + /** + * Helper class adds statements to the sail as they are visited by a + * parser. + */ + private class AddStatementHandler extends RDFHandlerBase { + + private final StatementBuffer<Statement> buffer; + + private Resource context = null; + + private long n = 0L; + + public AddStatementHandler() { + + buffer = new StatementBuffer<Statement>(store, 100/* capacity */); + + } + + public void setContext(final Resource context) { + + this.context = context; + + } + + public void handleStatement(final Statement stmt) + throws RDFHandlerException { + + final Resource s = stmt.getSubject(); + final URI p = stmt.getPredicate(); + final Value o = stmt.getObject(); + final Resource c = stmt.getContext() == null ? this.context + : stmt.getContext(); + +// if (log.isDebugEnabled()) +// log.debug("<" + s + "," + p + "," + o + "," + c + ">"); + + buffer.add(s, p, o, c, StatementEnum.Explicit); + + n++; + + } + + /** + * + * @return The #of statements visited by the parser. + */ + public long close() { + + buffer.flush(); + + return n; + + } + + } + + } + + public AbstractDataAndSPARQLTestCase() { + } + + public AbstractDataAndSPARQLTestCase(String name) { + super(name); + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataDrivenSPARQLTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataDrivenSPARQLTestCase.java 2013-11-20 22:41:35 UTC (rev 7576) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataDrivenSPARQLTestCase.java 2013-11-21 16:48:15 UTC (rev 7577) @@ -106,6 +106,7 @@ import com.bigdata.rdf.sparql.ast.ASTContainer; import com.bigdata.rdf.sparql.ast.AbstractASTEvaluationTestCase; import com.bigdata.rdf.sparql.ast.QueryRoot; +import com.bigdata.rdf.sparql.ast.eval.AbstractDataAndSPARQLTestCase.AbsHelper; import com.bigdata.rdf.store.AbstractTripleStore; /** @@ -120,7 +121,7 @@ * TODO Support manifest driven test suite. */ public class AbstractDataDrivenSPARQLTestCase extends - AbstractASTEvaluationTestCase { + AbstractDataAndSPARQLTestCase { private static final Logger log = Logger .getLogger(AbstractDataDrivenSPARQLTestCase.class); @@ -144,21 +145,14 @@ * Note: This class was derived from the openrdf SPARQLQueryTest file (Aduna * BSD style license). */ - public class TestHelper { + public class TestHelper extends AbsHelper { - private final String testURI; - private final String queryFileURL; - private final String[] dataFileURLs; private final String resultFileURL; - private final boolean laxCardinality; private final boolean checkOrder; - private final String queryStr; - - private final ASTContainer astContainer; -// private final AST2BOpContext context; + // private final PipelineOp queryPlan; @@ -174,6 +168,7 @@ } + /** * * @param testURI @@ -199,7 +194,7 @@ throws Exception { this(testURI, queryFileURL, dataFileURL, resultFileURL, - false/* laxCardinality */, false/* checkOrder */); + false/* checkOrder */); } @@ -208,17 +203,17 @@ throws Exception { this(testURI, queryFileURL, dataFileURLs, resultFileURL, - false/* laxCardinality */, false/* checkOrder */); + false/* checkOrder */); } public TestHelper(final String testURI, final String queryFileURL, final String dataFileURL, final String resultFileURL, - final boolean laxCardinality, final boolean checkOrder) + final boolean checkOrder) throws Exception { this(testURI, queryFileURL, new String[] { dataFileURL }, - resultFileURL, laxCardinality, checkOrder); + resultFileURL, checkOrder); } @@ -229,26 +224,21 @@ * @param queryFileURL * @param dataFileURLs * @param resultFileURL - * @param laxCardinality * @param checkOrder * @throws Exception */ public TestHelper(final String testURI, final String queryFileURL, final String[] dataFileURLs, final String resultFileURL, - final boolean laxCardinality, final boolean checkOrder) + final boolean checkOrder) throws Exception { + super(getResourceAsString(queryFileURL)); if (log.isInfoEnabled()) log.info("\ntestURI:\n" + testURI); - this.testURI = testURI; - this.queryFileURL = queryFileURL; - this.dataFileURLs = dataFileURLs; this.resultFileURL = resultFileURL; - this.laxCardinality = laxCardinality; this.checkOrder = checkOrder; - this.queryStr = getResourceAsString(queryFileURL); if (log.isInfoEnabled()) log.info("\nquery:\n" + queryStr); @@ -514,9 +504,7 @@ final TupleQueryResult expectedResult) throws QueryEvaluationException { - AbstractQueryEngineTestCase.compareTupleQueryResults(getName(), - testURI, store, astContainer, queryResult, expectedResult, - laxCardinality, checkOrder); + compareTupleQueryResults(queryResult, expectedResult, checkOrder); } @@ -527,116 +515,53 @@ expectedResult); } - protected InputStream getResourceAsStream(final String resource) { - - // try the classpath - InputStream is = getClass().getResourceAsStream(resource); - - if (is == null) { - - // Searching for the resource from the root of the class - // returned - // by getClass() (relative to the class' package) failed. - // Next try searching for the desired resource from the root - // of the jar; that is, search the jar file for an exact match - // of the input string. - is = getClass().getClassLoader().getResourceAsStream(resource); - - } - - if (is == null) { - - final File file = new File(resource); - - if (file.exists()) { - - try { - - is = new FileInputStream(resource); - - } catch (FileNotFoundException e) { - - throw new RuntimeException(e); - - } - - } - - } - - if (is == null) { - - try { - - is = new URL(resource).openStream(); - - } catch (MalformedURLException e) { - - /* - * Ignore. we will handle the problem below if this was not - * a URL. - */ - - } catch (IOException e) { - - throw new RuntimeException(e); - - } - - } - - if (is == null) - throw new RuntimeException("Not found: " + resource); - - return is; - - } - /** - * Return the contents of the resource. + * Load some RDF data. * * @param resource - * The resource. + * The resource whose data will be loaded. * - * @return It's contents. + * @return The #of statements parsed from the source. If there are + * duplicate told statements, then there may be fewer statements + * written onto the KB. */ - protected String getResourceAsString(final String resource) { + protected long loadData(final String resource) { - final StringBuilder sb = new StringBuilder(); + return loadData(getResourceAsStream(resource), RDFFormat.forFileName(resource), new File(resource).toURI().toString()); + + } + + } + - final InputStream is = getResourceAsStream(resource); + private static InputStream getResourceAsStream(final String resource) { - if (is == null) - throw new RuntimeException("Not found: " + resource); + // try the classpath + InputStream is = AbstractDataDrivenSPARQLTestCase.class.getResourceAsStream(resource); - try { + if (is == null) { - final LineNumberReader r = new LineNumberReader( - new InputStreamReader(is)); + // Searching for the resource from the root of the class + // returned + // by getClass() (relative to the class' package) failed. + // Next try searching for the desired resource from the root + // of the jar; that is, search the jar file for an exact match + // of the input string. + is = AbstractDataDrivenSPARQLTestCase.class.getClassLoader().getResourceAsStream(resource); - String s; - while ((s = r.readLine()) != null) { + } - sb.append(s); + if (is == null) { - sb.append("\n"); + final File file = new File(resource); - } + if (file.exists()) { - return sb.toString(); - - } catch (IOException e) { - - throw new RuntimeException(e); - - } finally { - try { - if (is != null) - is.close(); + is = new FileInputStream(resource); - } catch (IOException e) { + } catch (FileNotFoundException e) { throw new RuntimeException(e); @@ -646,122 +571,81 @@ } - /** - * Load some RDF data. - * - * @param resource - * The resource whose data will be loaded. - * - * @return The #of statements parsed from the source. If there are - * duplicate told statements, then there may be fewer statements - * written onto the KB. - */ - protected long loadData(final String resource) { + if (is == null) { - final RDFFormat rdfFormat = RDFFormat.forFileName(resource); - - final RDFParserFactory rdfParserFactory = RDFParserRegistry - .getInstance().get(rdfFormat); - - final RDFParser rdfParser = rdfParserFactory.getParser(); - - rdfParser.setValueFactory(store.getValueFactory()); - - rdfParser.setVerifyData(true); - - rdfParser.setStopAtFirstError(true); - - rdfParser.setDatatypeHandling(RDFParser.DatatypeHandling.IGNORE); - - final AddStatementHandler handler = new AddStatementHandler(); - - handler.setContext(new URIImpl(new File(resource).toURI().toString())); - - rdfParser.setRDFHandler(handler); - - /* - * Run the parser, which will cause statements to be inserted. - */ - - final InputStream is = getResourceAsStream(resource); - try { - rdfParser.parse(is, baseURI); + is = new URL(resource).openStream(); - return handler.close(); + } catch (MalformedURLException e) { - } catch (Exception e) { + /* + * Ignore. we will handle the problem below if this was not + * a URL. + */ + } catch (IOException e) { + throw new RuntimeException(e); - } finally { + } - try { + } - is.close(); + if (is == null) + throw new RuntimeException("Not found: " + resource); - } catch (IOException e) { + return is; - throw new RuntimeException(e); + } - } + /** + * Return the contents of the resource. + * + * @param resource + * The resource. + * + * @return It's contents. + */ + private static String getResourceAsString(final String resource) { - } + final StringBuilder sb = new StringBuilder(); - } + final InputStream is = getResourceAsStream(resource); - /** - * Helper class adds statements to the sail as they are visited by a - * parser. - */ - private class AddStatementHandler extends RDFHandlerBase { + if (is == null) + throw new RuntimeException("Not found: " + resource); - private final StatementBuffer<Statement> buffer; + try { - private Resource context = null; - - private long n = 0L; + final LineNumberReader r = new LineNumberReader( + new InputStreamReader(is)); - public AddStatementHandler() { + String s; + while ((s = r.readLine()) != null) { - buffer = new StatementBuffer<Statement>(store, 100/* capacity */); + sb.append(s); - } + sb.append("\n"); - public void setContext(final Resource context) { - - this.context = context; - } - - public void handleStatement(final Statement stmt) - throws RDFHandlerException { - final Resource s = stmt.getSubject(); - final URI p = stmt.getPredicate(); - final Value o = stmt.getObject(); - final Resource c = stmt.getContext() == null ? this.context - : stmt.getContext(); + return sb.toString(); - if (log.isDebugEnabled()) - log.debug("<" + s + "," + p + "," + o + "," + c + ">"); + } catch (IOException e) { - buffer.add(s, p, o, c, StatementEnum.Explicit); + throw new RuntimeException(e); - n++; + } finally { - } + try { - /** - * - * @return The #of statements visited by the parser. - */ - public long close() { + if (is != null) + is.close(); - buffer.flush(); + } catch (IOException e) { - return n; + throw new RuntimeException(e); } @@ -769,4 +653,6 @@ } + + } Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractInlineSELECTTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractInlineSELECTTestCase.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractInlineSELECTTestCase.java 2013-11-21 16:48:15 UTC (rev 7577) @@ -0,0 +1,266 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* +Portions of this code are: + +Copyright Aduna (http://www.aduna-software.com/) � 2001-2007 + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +/* + * Created Nov 2013 + */ + +package com.bigdata.rdf.sparql.ast.eval; + +import info.aduna.iteration.Iterations; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.LineNumberReader; +import java.io.UnsupportedEncodingException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.log4j.Logger; +import org.openrdf.model.Resource; +import org.openrdf.model.Statement; +import org.openrdf.model.URI; +import org.openrdf.model.Value; +import org.openrdf.model.impl.URIImpl; +import org.openrdf.query.GraphQueryResult; +import org.openrdf.query.MalformedQueryException; +import org.openrdf.query.QueryEvaluationException; +import org.openrdf.query.TupleQueryResult; +import org.openrdf.query.algebra.evaluation.QueryBindingSet; +import org.openrdf.query.dawg.DAWGTestResultSetUtil; +import org.openrdf.query.impl.TupleQueryResultBuilder; +import org.openrdf.query.resultio.BooleanQueryResultFormat; +import org.openrdf.query.resultio.BooleanQueryResultParserRegistry; +import org.openrdf.query.resultio.QueryResultIO; +import org.openrdf.query.resultio.TupleQueryResultFormat; +import org.openrdf.query.resultio.TupleQueryResultParser; +import org.openrdf.rio.RDFFormat; +import org.openrdf.rio.RDFHandlerException; +import org.openrdf.rio.RDFParser; +import org.openrdf.rio.RDFParser.DatatypeHandling; +import org.openrdf.rio.RDFParserFactory; +import org.openrdf.rio.RDFParserRegistry; +import org.openrdf.rio.Rio; +import org.openrdf.rio.helpers.RDFHandlerBase; +import org.openrdf.rio.helpers.StatementCollector; + +import com.bigdata.rdf.model.StatementEnum; +import com.bigdata.rdf.rio.StatementBuffer; +import com.bigdata.rdf.sail.sparql.Bigdata2ASTSPARQLParser; +import com.bigdata.rdf.sparql.ast.AbstractASTEvaluationTestCase; +import com.bigdata.rdf.sparql.ast.QueryRoot; + +/** + * The idea here is that the subclasses provide the data for the test + * (i.e. the triples, the query and the results) inline and not in separate + * files. One goal is to reduce the level of redundant comments + * that can be out of sync. + * + * See {@link AbstractDataDrivenSPARQLTestCase} for the out-of-line files based approach, + * which may be more suitable for larger queries, test data or results. + * @author jeremycarroll + */ +public abstract class AbstractInlineSELECTTestCase extends AbstractDataAndSPARQLTestCase + { + + private static final Logger log = Logger + .getLogger(AbstractInlineSELECTTestCase.class); + + private final Map<String,String> prefixes = new HashMap<String,String>(); + /** + * + */ + public AbstractInlineSELECTTestCase() { + } + + /** + * @param name + */ + public AbstractInlineSELECTTestCase(String name) { + super(name); + addPrefix("","http://example.org/banana#"); + addPrefix("rdf","http://www.w3.org/1999/02/22-rdf-syntax-ns#"); + addPrefix("rdfs","http://www.w3.org/2000/01/rdf-schema#"); + addPrefix("xsd","http://www.w3.org/2001/XMLSchema#"); + } + + public void addPrefix(String prefix, String namespace) { + prefixes.put(prefix, namespace); + } + + public class Execute extends AbsHelper { + + + private static final String FILL_IN_URI = "http://please-do-not-user-relative-uris/"; + + + private TupleQueryResult executeSelect(String sparql) throws QueryEvaluationException, MalformedQueryException { + String prefixes = sparqlPrefixes(); + astContainer = new Bigdata2ASTSPARQLParser(store).parseQuery2(prefixes+sparql, FILL_IN_URI); + return ASTEvalHelper.evaluateTupleQuery(store, astContainer, new QueryBindingSet()); + } + + /** + * + * See http://stackoverflow.com/a/7716231/2276263 + * @param testURI + * @throws Exception + * + */ + public Execute(final String queryStr) throws Exception { + super(queryStr); + + + + if (log.isInfoEnabled()) + log.info("\nquery:\n" + queryStr); + } + + /** + * Load some RDF data. + * + * @param data + * The in-line data to be loaded. + * + * @return The #of statements parsed from the source. If there are + * duplicate told statements, then there may be fewer statements + * written onto the KB. + * @throws UnsupportedEncodingException + */ + private long loadData(String data) throws UnsupportedEncodingException { + + if (data == null || data.length() == 0) { + return 0; + } + + data = ttlPrefixes() + data; + + byte utf8Data[] = data.getBytes("utf-8"); + final InputStream is = new ByteArrayInputStream(utf8Data); + String uri = FILL_IN_URI; + + RDFFormat format = RDFFormat.TRIG; + return loadData(is, format, uri); + + } + + public void expectResultSet(String vars, String ... bindings) throws Exception { + final TupleQueryResult expectedResult = expectedTupleQueryResult(vars, bindings); + + loadData(trigData()); + + final TupleQueryResult queryResult = executeSelect(queryStr); + compareTupleQueryResults(queryResult, expectedResult, false); + } + + private TupleQueryResult expectedTupleQueryResult(String vars, String ...bindings ) throws QueryEvaluationException, MalformedQueryException { + StringBuilder sb = new StringBuilder(); + sb.append("SELECT "); + sb.append(vars); + sb.append("\n{} BINDINGS "); + sb.append(vars); + sb.append("{\n"); + for (String binding:bindings) { + sb.append(" ( "); + sb.append(binding); + sb.append(" )\n"); + } + sb.append("}\n"); + return executeSelect( sb.toString()); + } + + protected String trigData() { + return AbstractInlineSELECTTestCase.this.trigData(); + } + + } + + protected abstract String trigData(); + + public String sparqlPrefixes() { + StringBuilder sb = new StringBuilder(); + for (Map.Entry<String, String> entry: prefixes.entrySet()) { + sb.append("PREFIX "); + sb.append(entry.getKey()); + sb.append(": <"); + sb.append(entry.getValue()); + sb.append(">\n"); + } + sb.append("\n"); + return sb.toString(); + } + + public String ttlPrefixes() { + StringBuilder sb = new StringBuilder(); + for (Map.Entry<String, String> entry: prefixes.entrySet()) { + sb.append("@prefix "); + sb.append(entry.getKey()); + sb.append(": <"); + sb.append(entry.getValue()); + sb.append(">.\n"); + } + sb.append("\n"); + return sb.toString(); + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java 2013-11-20 22:41:35 UTC (rev 7576) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java 2013-11-21 16:48:15 UTC (rev 7577) @@ -26,7 +26,6 @@ import com.bigdata.rdf.sparql.ast.QueryHints; import com.bigdata.rdf.sparql.ast.eval.reif.TestReificationDoneRightEval; -import junit.extensions.TestSetup; import junit.framework.Test; import junit.framework.TestCase; import junit.framework.TestSuite; @@ -56,7 +55,7 @@ * Returns a test that will run each of the implementation specific test * suites in turn. */ - private static TestSuite coreSuite() + public static Test suite() { final TestSuite suite = new TestSuite("AST Evaluation"); @@ -182,31 +181,4 @@ } - - /** - * Returns a test that will run each of the implementation specific test - * suites in turn. - */ - public static Test suite() - { - - final TestSuite suite = new TestSuite("AST Evaluation (all)"); - final TestSuite tHash = coreSuite(); - tHash.setName("AST Evaluation (tHash)"); - suite.addTest(new TestSetup(tHash) { - - protected void setUp() throws Exception { - AST2BOpContext.DEFAULT_NATIVE_HASH_JOINS = true; - } - protected void tearDown() throws Exception { - AST2BOpContext.DEFAULT_NATIVE_HASH_JOINS = false; - } - - }); - final TestSuite jvmHash = coreSuite(); - jvmHash.setName("AST Evaluation (jvmHash)"); - suite.addTest(jvmHash); - return suite; - } - } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestInclude.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestInclude.java 2013-11-20 22:41:35 UTC (rev 7576) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestInclude.java 2013-11-21 16:48:15 UTC (rev 7577) @@ -127,7 +127,6 @@ "include_01.rq",// query URL "include_01.trig",// data URL "include_01.srx",// results URL - false,// lax cardinality true // check order(!) ); @@ -238,7 +237,6 @@ "include_02.rq",// query URL "include_02.trig",// data URL "include_02.srx",// results URL - false,// lax cardinality true // check order(!) ); @@ -372,7 +370,6 @@ "include_03a.rq",// query URL "include_03.trig",// data URL "include_03.srx",// results URL - false,// lax cardinality false // check order ); @@ -519,7 +516,6 @@ "include_03.rq",// query URL "include_03.trig",// data URL "include_03.srx",// results URL - false,// lax cardinality false // check order ); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTCK.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTCK.java 2013-11-20 22:41:35 UTC (rev 7576) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTCK.java 2013-11-21 16:48:15 UTC (rev 7577) @@ -757,7 +757,6 @@ "sparql11-order-02.rq",// queryFileURL "sparql11-order-02.ttl",// dataFileURL "sparql11-order-02.srx"// resultFileURL - ,false// laxCardinality ,true// checkOrder ).runTest(); @@ -782,7 +781,6 @@ "sparql11-order-02-workaround.rq",// queryFileURL "sparql11-order-02.ttl",// dataFileURL "sparql11-order-02.srx"// resultFileURL - ,false// laxCardinality ,true// checkOrder ).runTest(); @@ -806,7 +804,6 @@ "sparql11-order-03.rq",// queryFileURL "sparql11-order-03.ttl",// dataFileURL "sparql11-order-03.srx"// resultFileURL - ,false// laxCardinality ,true// checkOrder ).runTest(); @@ -859,7 +856,6 @@ "join_with_no_shared_variables.rq",// queryFileURL "join_with_no_shared_variables.ttl",// dataFileURL "join_with_no_shared_variables.srx"// resultFileURL - ,false// laxCardinality ,false// checkOrder ).runTest(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTickets.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTickets.java 2013-11-20 22:41:35 UTC (rev 7576) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTickets.java 2013-11-21 16:48:15 UTC (rev 7577) @@ -27,8 +27,6 @@ package com.bigdata.rdf.sparql.ast.eval; -import com.bigdata.BigdataStatics; - /** * Test suite for tickets at <href a="http://sourceforge.net/apps/trac/bigdata"> * trac </a>. @@ -180,11 +178,6 @@ } public void test_ticket_748() throws Exception { - // Concerning omitting the test with hash joins, see Trac776 and - // com.bigdata.rdf.internal.encoder.AbstractBindingSetEncoderTestCase.test_solutionWithOneMockIV() - - if(AST2BOpContext.DEFAULT_NATIVE_HASH_JOINS && !BigdataStatics.runKnownBadTests) - return; new TestHelper("ticket748-subselect",// testURI, "ticket748-subselect.rq",// queryFileURL @@ -197,9 +190,6 @@ public void test_ticket_748a() throws Exception { - if(AST2BOpContext.DEFAULT_NATIVE_HASH_JOINS && !BigdataStatics.runKnownBadTests) - return; - new TestHelper("ticket748A-subselect",// testURI, "ticket748A-subselect.rq",// queryFileURL "ticket748-subselect.ttl",// dataFileURL @@ -210,9 +200,6 @@ public void test_ticket_two_subselects_748() throws Exception { - if(AST2BOpContext.DEFAULT_NATIVE_HASH_JOINS && !BigdataStatics.runKnownBadTests) - return; - new TestHelper("ticket748-two-subselects",// testURI, "ticket748-two-subselects.rq",// queryFileURL "ticket748-two-subselects.ttl",// dataFileURL @@ -224,9 +211,6 @@ public void test_ticket_two_subselects_748a() throws Exception { - if(AST2BOpContext.DEFAULT_NATIVE_HASH_JOINS && !BigdataStatics.runKnownBadTests) - return; - new TestHelper("ticket748A-two-subselects",// testURI, "ticket748A-two-subselects.rq",// queryFileURL "ticket748-two-subselects.ttl",// dataFileURL @@ -263,7 +247,6 @@ "ticket563-DistinctOrderBy.rq",// queryFileURL "ticket563-DistinctOrderBy.n3",// dataFileURL "ticket563-DistinctOrderBy.srx",// resultFileURL - false, // laxCardinality true // checkOrder ).runTest(); @@ -276,7 +259,6 @@ "aggregate-min.rq",// queryFileURL "aggregate-min-max.ttl",// dataFileURL "aggregate-min.srx",// resultFileURL - false, // laxCardinality true // checkOrder ).runTest(); @@ -288,7 +270,6 @@ "aggregate-max.rq",// queryFileURL "aggregate-min-max.ttl",// dataFileURL "aggregate-max.srx",// resultFileURL - false, // laxCardinality true // checkOrder ).runTest(); @@ -300,7 +281,6 @@ "aggregate-min1.rq",// queryFileURL "aggregate-min-max.ttl",// dataFileURL "aggregate-min1.srx",// resultFileURL - false, // laxCardinality true // checkOrder ).runTest(); @@ -312,7 +292,6 @@ "aggregate-max1.rq",// queryFileURL "aggregate-min-max.ttl",// dataFileURL "aggregate-max1.srx",// resultFileURL - false, // laxCardinality true // checkOrder ).runTest(); @@ -324,7 +303,6 @@ "aggregate-min2.rq",// queryFileURL "aggregate-min-max.ttl",// dataFileURL "aggregate-min2.srx",// resultFileURL - false, // laxCardinality true // checkOrder ).runTest(); @@ -336,7 +314,6 @@ "aggregate-max2.rq",// queryFileURL "aggregate-min-max.ttl",// dataFileURL "aggregate-max2.srx",// resultFileURL - false, // laxCardinality true // checkOrder ).runTest(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestUnionMinus.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestUnionMinus.java 2013-11-20 22:41:35 UTC (rev 7576) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestUnionMinus.java 2013-11-21 16:48:15 UTC (rev 7577) @@ -27,12 +27,14 @@ package com.bigdata.rdf.sparql.ast.eval; +import com.bigdata.BigdataStatics; + /** * Test suite for UNION and MINUS combined, see * https://sourceforge.net/apps/trac/bigdata/ticket/767 * */ -public class TestUnionMinus extends AbstractDataDrivenSPARQLTestCase { +public class TestUnionMinus extends AbstractInlineSELECTTestCase { /** * @@ -47,39 +49,179 @@ super(name); } - /** -SELECT ?s -WHERE { - { - BIND ( :bob as ?s ) - } UNION { - } - MINUS { - BIND ( :bob as ?s ) - } -} LIMIT 10 - */ + @Override + public String trigData() { + return ""; + } + + public void test_union_minus_01() throws Exception { + // Concerning omitting the test with hash joins, see Trac776 and + // com.bigdata.rdf.internal.encoder.AbstractBindingSetEncoderTestCase.test_solutionWithOneMockIV() + + new Execute( + "SELECT ?s \r\n" + + "WHERE { \r\n" + + " { \r\n" + + " BIND ( :bob as ?s ) \r\n" + + " } UNION { \r\n" + + " } \r\n" + + " MINUS { \r\n" + + " BIND ( :bob as ?s ) \r\n" + + " } \r\n" + + "}").expectResultSet("?s","UNDEF"); + + } - new TestHelper("union_minus_01").runTest(); + + public void test_union_minus_02() throws Exception { + + new Execute( + "SELECT ?s\r\n" + + "WHERE {\r\n" + + " { \r\n" + + " BIND ( :bob as ?s )\r\n" + + " } UNION {\r\n" + + " }\r\n" + + " FILTER (!BOUND(?s) || ?s != :bob)\r\n" + + "}").expectResultSet("?s","UNDEF"); } + public void test_union_minus_03() throws Exception { - /** -SELECT ?s -WHERE { - { - BIND ( :bob as ?s ) - } UNION { - } - FILTER (!BOUND(?s) || ?s != :bob) -} + new Execute( + "SELECT ?s \r\n" + + "WHERE { \r\n" + + " { \r\n" + + " BIND ( 2 as ?s ) \r\n" + + " } UNION { \r\n" + + " } \r\n" + + " MINUS { \r\n" + + " BIND ( 2 as ?s ) \r\n" + + " } \r\n" + + "}").expectResultSet("?s","UNDEF"); + + } + public void test_union_minus_04() throws Exception { - */ - public void test_union_minus_02() throws Exception { + new Execute( + "SELECT ?x \r\n" + + "WHERE { \r\n" + + " BIND ( 3 as ?x ) \r\n" + + " { BIND ( 4 as ?x ) \r\n" + + " } UNION { \r\n" + + " MINUS { \r\n" + + " BIND ( 3 as ?x ) \r\n" + + " } \r\n" + + " BIND (3 as ?x) \r\n" + + " } \r\n" + + "}").expectResultSet("?x","3"); + + } + public void test_union_minus_05() throws Exception { - new TestHelper("union_minus_02").runTest(); + new Execute( + "SELECT ?x \r\n" + + "WHERE { \r\n" + + " BIND ( 3 as ?x ) \r\n" + + " { BIND ( 4 as ?x ) \r\n" + + " } UNION { \r\n" + + " MINUS { \r\n" + + " BIND ( 3 as ?x ) \r\n" + + " } \r\n" + + " } \r\n" + + "}").expectResultSet("?x","3"); } + public void test_union_minus_06() throws Exception { + + new Execute( + "SELECT ?x \r\n" + + "WHERE { \r\n" + + " BIND ( 3 as ?x ) \r\n" + + " { BIND ( 4 as ?x ) \r\n" + + " } UNION { \r\n" + + " BIND (3 as ?x) \r\n" + + " MINUS { \r\n" + + " BIND ( 3 as ?x ) \r\n" + + " } \r\n" + + " } \r\n" + + "}").expectResultSet("?x"); + + } + + public void test_union_minus_07() throws Exception { + + new Execute( + "SELECT ?x \r\n" + + "WHERE { \r\n" + + " BIND ( 3 as ?x ) \r\n" + + " { BIND ( 4 as ?x ) \r\n" + + " } UNION { \r\n" + + " BIND ( 3 as ?x ) \r\n" + + " MINUS { \r\n" + + " { \r\n" + + " BIND ( 3 as ?x ) \r\n" + + " } UNION { \r\n" + + " BIND ( 4 as ?y ) \r\n" + + " } \r\n" + + " } \r\n" + + " } \r\n" + + "}").expectResultSet("?x"); + + } + + public void test_union_minus_08() throws Exception { + + new Execute( + "SELECT ?x \r\n" + + "WHERE { \r\n" + + " BIND ( 3 as ?x ) \r\n" + + " { BIND ( 4 as ?x ) \r\n" + + " } UNION { \r\n" + + " BIND ( 3 as ?x ) \r\n" + + " MINUS { \r\n" + + " { \r\n" + + " BIND ( 3 as ?x ) \r\n" + + " } UNION { \r\n" + + " } \r\n" + + " } \r\n" + + " } \r\n" + + "}").expectResultSet("?x"); + + } + + public void test_union_minus_09() throws Exception { + + new Execute( + "SELECT ?x \r\n" + + "WHERE { \r\n" + + " BIND ( 3 as ?x ) \r\n" + + " { BIND ( 4 as ?x ) \r\n" + + " } UNION { \r\n" + + " BIND ( 3 as ?x ) \r\n" + + " MINUS { \r\n" + + " } \r\n" + + " } \r\n" + + "}").expectResultSet("?x","3"); + + } + + public void test_union_minus_10() throws Exception { + + new Execute( + "SELECT ?x \r\n" + + "WHERE { \r\n" + + " { BIND ( 3 as ?x ) } \r\n" + + " UNION \r\n" + + " { BIND ( 4 as ?y ) } \r\n" + + " MINUS { \r\n" + + " { BIND ( 3 as ?x ) } \r\n" + + " UNION \r\n" + + " { BIND ( 4 as ?y ) } \r\n" + + " } \r\n" + + "}").expectResultSet("?x","3"); + + } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestServiceRegistry.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestServiceRegistry.java 2013-11-20 22:41:35 UTC (rev 7576) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestServiceRegistry.java 2013-11-21 16:48:15 UTC (rev 7577) @@ -300,7 +300,6 @@ // De-register alias ServiceRegistry.getInstance().remove(serviceURI1); - ServiceRegistry.getInstance().remove(serviceURI2); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |