From: <mar...@us...> - 2014-04-02 13:13:00
|
Revision: 8026 http://sourceforge.net/p/bigdata/code/8026 Author: martyncutcher Date: 2014-04-02 13:12:56 +0000 (Wed, 02 Apr 2014) Log Message: ----------- Initial commit on creation of HA1_HA5 branch Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java Added Paths: ----------- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-D.properties branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-E.properties branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient1.config branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient5.config branches/BIGDATA_MGC_HA1_HA5/branch-notes.txt Removed Paths: ------------- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-02 13:03:59 UTC (rev 8025) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-02 13:12:56 UTC (rev 8026) @@ -3428,8 +3428,8 @@ if (quorum == null) return; - if (!quorum.isHighlyAvailable()) - return; +// if (!quorum.isHighlyAvailable()) +// return; /** * CRITICAL SECTION. We need obtain a distributed consensus for the Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-04-02 13:03:59 UTC (rev 8025) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-04-02 13:12:56 UTC (rev 8026) @@ -4574,7 +4574,7 @@ // } - log.warn("Starting NSS"); + log.warn("Starting NSS from " + jettyXml); // Start the server. jettyServer.start(); @@ -4658,8 +4658,9 @@ if (tmp == null) throw new IllegalStateException("Server is not running"); - return tmp.getConnectors()[0].getLocalPort(); - + final int port = tmp.getConnectors()[0].getLocalPort(); + haLog.warn("Returning NSSPort: " + port); + return port; } /** Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-04-02 13:03:59 UTC (rev 8025) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-04-02 13:12:56 UTC (rev 8026) @@ -1096,6 +1096,8 @@ + haLogBytesOnDisk// + ", journalSize=" + journalSize// + + ", thresholdPercentLogSize=" + + thresholdPercentLogSize// + ", percentLogSize=" + actualPercentLogSize// + "%, takeSnapshot=" + takeSnapshot // Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-02 13:03:59 UTC (rev 8025) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-02 13:12:56 UTC (rev 8026) @@ -133,7 +133,7 @@ * Implementation listens for the death of the child process and can be used * to decide when the child process is no longer executing. */ - private static class ServiceListener implements IServiceListener { + public static class ServiceListener implements IServiceListener { private volatile HAGlue haGlue; private volatile ProcessHelper processHelper; @@ -218,8 +218,12 @@ * The {@link Remote} interfaces for these services (if started and * successfully discovered). */ - private HAGlue serverA = null, serverB = null, serverC = null; + protected HAGlue serverA = null; + protected HAGlue serverB = null; + + protected HAGlue serverC = null; + /** * {@link UUID}s for the {@link HAJournalServer}s. */ @@ -232,17 +236,20 @@ * @see <a href="http://trac.bigdata.com/ticket/730" > Allow configuration * of embedded NSS jetty server using jetty-web.xml </a> */ - private final int A_JETTY_PORT = 8090, B_JETTY_PORT = A_JETTY_PORT + 1, - C_JETTY_PORT = B_JETTY_PORT + 1; + protected final int A_JETTY_PORT = 8090; + protected final int B_JETTY_PORT = A_JETTY_PORT + 1; + protected final int C_JETTY_PORT = B_JETTY_PORT + 1; /** * These {@link IServiceListener}s are used to reliably detect that the * corresponding process starts and (most importantly) that it is really * dies once it has been shutdown or destroyed. */ - private ServiceListener serviceListenerA = null, serviceListenerB = null; + protected ServiceListener serviceListenerA = null; - private ServiceListener serviceListenerC = null; + protected ServiceListener serviceListenerB = null; + + protected ServiceListener serviceListenerC = null; private LookupDiscoveryManager lookupDiscoveryManager = null; @@ -1143,14 +1150,14 @@ } - private void safeShutdown(final HAGlue haGlue, final File serviceDir, + void safeShutdown(final HAGlue haGlue, final File serviceDir, final ServiceListener serviceListener) { safeShutdown(haGlue, serviceDir, serviceListener, false/* now */); } - private void safeShutdown(final HAGlue haGlue, final File serviceDir, + protected void safeShutdown(final HAGlue haGlue, final File serviceDir, final ServiceListener serviceListener, final boolean now) { if (haGlue == null) @@ -1368,6 +1375,10 @@ } + protected String getZKConfigFile() { + return "zkClient.config"; + } + /** * Return Zookeeper quorum that can be used to reflect (or act on) the * distributed quorum state for the logical service. @@ -1382,7 +1393,7 @@ KeeperException, IOException { final Configuration config = ConfigurationProvider - .getInstance(new String[] { SRC_PATH + "zkClient.config" }); + .getInstance(new String[] { SRC_PATH + getZKConfigFile() }); zkClientConfig = new ZookeeperClientConfig(config); @@ -1551,7 +1562,7 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ - abstract private class StartServerTask implements Callable<HAGlue> { + public abstract class StartServerTask implements Callable<HAGlue> { private final String name; private final String configName; Added: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java (rev 0) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java 2014-04-02 13:12:56 UTC (rev 8026) @@ -0,0 +1,466 @@ +package com.bigdata.journal.jini.ha; + +import java.io.File; +import java.io.IOException; +import java.rmi.Remote; +import java.security.DigestException; +import java.security.NoSuchAlgorithmException; +import java.util.LinkedList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import com.bigdata.ha.HAGlue; +import com.bigdata.jini.start.IServiceListener; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownATask; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownBTask; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownCTask; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownTask; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.ServiceListener; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartATask; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartBTask; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartCTask; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartServerTask; +import com.bigdata.quorum.AsynchronousQuorumCloseException; + +public class AbstractHA5JournalServerTestCase extends + AbstractHA3JournalServerTestCase { + + /** + * The {@link Remote} interfaces for these services (if started and + * successfully discovered). + */ + protected HAGlue serverD = null; + + protected HAGlue serverE = null; + + /** + * {@link UUID}s for the {@link HAJournalServer}s. + */ + private UUID serverDId = UUID.randomUUID(); + private UUID serverEId = UUID.randomUUID(); + + /** + * The HTTP ports at which the services will respond. + * + * @see <a href="http://trac.bigdata.com/ticket/730" > Allow configuration + * of embedded NSS jetty server using jetty-web.xml </a> + */ + protected final int D_JETTY_PORT = C_JETTY_PORT + 1; + protected final int E_JETTY_PORT = D_JETTY_PORT + 1; + + protected String getZKConfigFile() { + return "zkClient5.config"; // 5 stage pipeline + } + + /** + * These {@link IServiceListener}s are used to reliably detect that the + * corresponding process starts and (most importantly) that it is really + * dies once it has been shutdown or destroyed. + */ + protected ServiceListener serviceListenerD = null, serviceListenerE = null; + + protected File getServiceDirD() { + return new File(getTestDir(), "D"); + } + + protected File getServiceDirE() { + return new File(getTestDir(), "E"); + } + + protected File getHAJournalFileD() { + return new File(getServiceDirD(), "bigdata-ha.jnl"); + } + + protected File getHAJournalFileE() { + return new File(getServiceDirE(), "bigdata-ha.jnl"); + } + + protected File getHALogDirD() { + return new File(getServiceDirD(), "HALog"); + } + + protected File getHALogDirE() { + return new File(getServiceDirE(), "HALog"); + } + + /** + * Start A then B then C. As each service starts, this method waits for that + * service to appear in the pipeline in the proper position. + * + * @return The ordered array of services <code>[A, B, C]</code> + */ + protected HAGlue[] startSequenceABCDE() throws Exception { + + startA(); + awaitPipeline(new HAGlue[] { serverA }); + + startB(); + awaitPipeline(new HAGlue[] { serverA, serverB }); + + startC(); + awaitPipeline(new HAGlue[] { serverA, serverB, serverC }); + + startD(); + awaitPipeline(new HAGlue[] { serverA, serverB, serverC, serverD }); + + startE(); + awaitPipeline(new HAGlue[] { serverA, serverB, serverC, serverD, serverE }); + + return new HAGlue[] { serverA, serverB, serverC, serverD, serverE }; + + } + + /** + * Helper class for simultaneous/seqeunced start of 3 HA services. + */ + protected class ABCDE { + + /** + * The services. + */ + final HAGlue serverA, serverB, serverC, serverD, serverE; + + /** + * Start of 3 HA services (this happens in the ctor). + * + * @param sequential + * True if the startup should be sequential or false + * if services should start concurrently. + * @throws Exception + */ + public ABCDE(final boolean sequential) + throws Exception { + + this(true/* sequential */, true/* newServiceStarts */); + + } + + /** + * Start of 3 HA services (this happens in the ctor). + * + * @param sequential + * True if the startup should be sequential or false if + * services should start concurrently. + * @param newServiceStarts + * When <code>true</code> the services are new, the database + * should be at <code>commitCounter:=0</code> and the + * constructor will check for the implicit create of the + * default KB. + * @throws Exception + */ + public ABCDE(final boolean sequential, final boolean newServiceStarts) + throws Exception { + + if (sequential) { + + final HAGlue[] services = startSequenceABCDE(); + + serverA = services[0]; + + serverB = services[1]; + + serverC = services[2]; + + serverD = services[3]; + + serverE = services[4]; + + } else { + + final List<Callable<HAGlue>> tasks = new LinkedList<Callable<HAGlue>>(); + + tasks.add(new StartATask(false/* restart */)); + tasks.add(new StartBTask(false/* restart */)); + tasks.add(new StartCTask(false/* restart */)); + tasks.add(new StartDTask(false/* restart */)); + tasks.add(new StartETask(false/* restart */)); + + // Start all servers in parallel. Wait up to a timeout. + final List<Future<HAGlue>> futures = executorService.invokeAll( + tasks, 30/* timeout */, TimeUnit.SECONDS); + + serverA = futures.get(0).get(); + + serverB = futures.get(1).get(); + + serverC = futures.get(2).get(); + + serverD = futures.get(3).get(); + + serverE = futures.get(4).get(); + + } + + // wait for the quorum to fully meet. + awaitFullyMetQuorum(); + + if(newServiceStarts) { + // wait for the initial commit point (KB create). + awaitCommitCounter(1L, serverA, serverB, serverC, serverD, serverE); + } + + } + + public void shutdownAll() throws InterruptedException, + ExecutionException { + + shutdownAll(false/* now */); + + } + + public void shutdownAll(final boolean now) throws InterruptedException, + ExecutionException { + + final List<Callable<Void>> tasks = new LinkedList<Callable<Void>>(); + + tasks.add(new SafeShutdownATask()); + tasks.add(new SafeShutdownBTask()); + tasks.add(new SafeShutdownCTask()); + tasks.add(new SafeShutdownDTask()); + tasks.add(new SafeShutdownETask()); + + // Start all servers in parallel. Wait up to a timeout. + final List<Future<Void>> futures = executorService.invokeAll( + tasks, 30/* timeout */, TimeUnit.SECONDS); + + futures.get(0).get(); + futures.get(1).get(); + futures.get(2).get(); + futures.get(3).get(); + futures.get(4).get(); + + } + + public void assertDigestsEqual() throws NoSuchAlgorithmException, DigestException, IOException { + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC, serverD, serverE }); + } + + } + + protected HAGlue startD() throws Exception { + + return new StartDTask(false/* restart */).call(); + + } + + protected HAGlue startE() throws Exception { + + return new StartETask(false/* restart */).call(); + + } + + protected HAGlue restartD() throws Exception { + + return new StartDTask(true/* restart */).call(); + + } + + protected HAGlue restartE() throws Exception { + + return new StartETask(true/* restart */).call(); + + } + + protected void shutdownD() throws IOException { + safeShutdown(serverD, getServiceDirD(), serviceListenerD, true); + + serverD = null; + serviceListenerD = null; + } + + protected void shutdownE() throws IOException { + safeShutdown(serverE, getServiceDirE(), serviceListenerE, true); + + serverE = null; + serviceListenerE = null; + } + + protected class StartDTask extends StartServerTask { + + public StartDTask(final boolean restart) { + + super("D", "HAJournal-D.config", serverDId, D_JETTY_PORT, + serviceListenerD = new ServiceListener(), restart); + + } + + @Override + public HAGlue call() throws Exception { + + if (restart) { + + safeShutdown(serverD, getServiceDirD(), serviceListenerD); + + serverD = null; + + } + + return serverD = start(); + + } + + } + + protected class StartETask extends StartServerTask { + + public StartETask(final boolean restart) { + + super("E", "HAJournal-E.config", serverEId, E_JETTY_PORT, + serviceListenerE = new ServiceListener(), restart); + + } + + @Override + public HAGlue call() throws Exception { + + if (restart) { + + safeShutdown(serverE, getServiceDirE(), serviceListenerE); + + serverE = null; + + } + + return serverE = start(); + + } + + } + + protected class SafeShutdownDTask extends SafeShutdownTask { + + public SafeShutdownDTask() { + this(false/* now */); + } + + public SafeShutdownDTask(final boolean now) { + super(serverD, getServiceDirC(), serviceListenerD, now); + } + + } + + protected class SafeShutdownETask extends SafeShutdownTask { + + public SafeShutdownETask() { + this(false/* now */); + } + + public SafeShutdownETask(final boolean now) { + super(serverE, getServiceDirC(), serviceListenerE, now); + } + + } + public AbstractHA5JournalServerTestCase() { + } + + public AbstractHA5JournalServerTestCase(final String name) { + super(name); + } + + protected void destroyAll() throws AsynchronousQuorumCloseException, + InterruptedException, TimeoutException { + /** + * The most reliable tear down is in reverse pipeline order. + * + * This may not be necessary long term but for now we want to avoid + * destroying the leader first since it can lead to problems as + * followers attempt to reform + */ + final HAGlue leader; + final File leaderServiceDir; + final ServiceListener leaderListener; + if (quorum.isQuorumMet()) { + final long token = quorum.awaitQuorum(awaitQuorumTimeout, + TimeUnit.MILLISECONDS); + /* + * Note: It is possible to resolve a proxy for a service that has + * been recently shutdown or destroyed. This is effectively a data + * race. + */ + final HAGlue t = quorum.getClient().getLeader(token); + if (t.equals(serverA)) { + leader = t; + leaderServiceDir = getServiceDirA(); + leaderListener = serviceListenerA; + } else if (t.equals(serverB)) { + leader = t; + leaderServiceDir = getServiceDirB(); + leaderListener = serviceListenerB; + } else if (t.equals(serverC)) { + leader = t; + leaderServiceDir = getServiceDirC(); + leaderListener = serviceListenerC; + } else if (t.equals(serverD)) { + leader = t; + leaderServiceDir = getServiceDirD(); + leaderListener = serviceListenerD; + } else if (t.equals(serverE)) { + leader = t; + leaderServiceDir = getServiceDirE(); + leaderListener = serviceListenerE; + } else { + if (serverA == null && serverB == null && serverC == null && serverD == null && serverE == null) { + /* + * There are no services running and nothing to shutdown. We + * probably resolved a stale proxy to the leader above. + */ + return; + } + throw new IllegalStateException( + "Leader is none of A, B, or C: leader=" + t + ", A=" + + serverA + ", B=" + serverB + ", C=" + serverC); + } + } else { + leader = null; + leaderServiceDir = null; + leaderListener = null; + } + + if (leader == null || !leader.equals(serverA)) { + destroyA(); + } + + if (leader == null || !leader.equals(serverB)) { + destroyB(); + } + + if (leader == null || !leader.equals(serverC)) { + destroyC(); + } + + if (leader == null || !leader.equals(serverD)) { + destroyD(); + } + + if (leader == null || !leader.equals(serverE)) { + destroyE(); + } + + // Destroy leader last + if (leader != null) { + safeDestroy(leader, leaderServiceDir, leaderListener); + + serverA = serverB = serverC = serverD = serverE = null; + serviceListenerA = serviceListenerC = serviceListenerB = serviceListenerD = serviceListenerE = null; + } + + } + + protected void destroyD() { + safeDestroy(serverD, getServiceDirD(), serviceListenerD); + serverD = null; + serviceListenerD = null; + } + + protected void destroyE() { + safeDestroy(serverE, getServiceDirE(), serviceListenerE); + serverE = null; + serviceListenerE = null; + } + +} Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-04-02 13:03:59 UTC (rev 8025) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-04-02 13:12:56 UTC (rev 8026) @@ -440,8 +440,17 @@ * Wait for the service to report that it is ready as a leader or * follower. */ - haGlue.awaitHAReady(awaitQuorumTimeout, TimeUnit.MILLISECONDS); + return awaitNSSAndHAReady(haGlue, awaitQuorumTimeout, TimeUnit.MILLISECONDS); + } + + protected HAStatusEnum awaitNSSAndHAReady(final HAGlue haGlue, long timout, TimeUnit unit) + throws Exception { /* + * Wait for the service to report that it is ready as a leader or + * follower. + */ + haGlue.awaitHAReady(timout, unit); + /* * Wait for the NSS to report the status of the service (this verifies * that the NSS interface is running). */ @@ -525,7 +534,9 @@ protected String getNanoSparqlServerURL(final HAGlue haGlue) throws IOException { - return "http://localhost:" + haGlue.getNSSPort(); + final int port = haGlue.getNSSPort(); + + return "http://localhost:" + port; } @@ -541,7 +552,7 @@ final String sparqlEndpointURL = getNanoSparqlServerURL(haGlue) + "/sparql"; - + // Client for talking to the NSS. final HttpClient httpClient = new DefaultHttpClient(ccm); @@ -1248,4 +1259,35 @@ } + /** + * The effective name for this test as used to name the directories in which + * we store things. + * + * TODO If there are method name collisions across the different test + * classes then the test suite name can be added to this. Also, if there are + * file naming problems, then this value can be munged before it is + * returned. + */ + private final String effectiveTestFileName = getClass().getSimpleName() + + "." + getName(); + + /** + * The directory that is the parent of each {@link HAJournalServer}'s + * individual service directory. + */ + protected File getTestDir() { + return new File(TGT_PATH, getEffectiveTestFileName()); + } + + /** + * The effective name for this test as used to name the directories in which + * we store things. + */ + protected String getEffectiveTestFileName() { + + return effectiveTestFileName; + + } + + } Added: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config (rev 0) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config 2014-04-02 13:12:56 UTC (rev 8026) @@ -0,0 +1,288 @@ +import net.jini.jeri.BasicILFactory; +import net.jini.jeri.BasicJeriExporter; +import net.jini.jeri.tcp.TcpServerEndpoint; + +import net.jini.discovery.LookupDiscovery; +import net.jini.core.discovery.LookupLocator; +import net.jini.core.entry.Entry; +import net.jini.lookup.entry.Name; +import net.jini.lookup.entry.Comment; +import net.jini.lookup.entry.Address; +import net.jini.lookup.entry.Location; +import net.jini.lookup.entry.ServiceInfo; +import net.jini.core.lookup.ServiceTemplate; + +import java.io.File; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.UUID; + +import com.bigdata.util.NV; +import com.bigdata.util.config.NicUtil; +import com.bigdata.journal.Options; +import com.bigdata.journal.BufferMode; +import com.bigdata.journal.jini.ha.HAJournal; +import com.bigdata.jini.lookup.entry.*; +import com.bigdata.service.IBigdataClient; +import com.bigdata.service.AbstractTransactionService; +import com.bigdata.service.jini.*; +import com.bigdata.service.jini.lookup.DataServiceFilter; +import com.bigdata.service.jini.master.ServicesTemplate; +import com.bigdata.jini.start.config.*; +import com.bigdata.jini.util.ConfigMath; + +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.data.ACL; +import org.apache.zookeeper.data.Id; + +// imports for various options. +import com.bigdata.btree.IndexMetadata; +import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.spo.SPORelation; +import com.bigdata.rdf.spo.SPOKeyOrder; +import com.bigdata.rdf.lexicon.LexiconRelation; +import com.bigdata.rdf.lexicon.LexiconKeyOrder; +import com.bigdata.rawstore.Bytes; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeUnit.*; + +/* + * This is a sample configuration file for a highly available Journal. A + * version of this file must be available to each HAJournalServer in the + * pipeline. + */ + +/* + * Globals. + */ +bigdata { + + private static fedname = "benchmark"; + + // NanoSparqlServer (http) port. + private static nssPort = ConfigMath.add(8090,3); + + // write replication pipeline port (listener). + private static haPort = ConfigMath.add(9090,3); + + // The #of services in the write pipeline. + private static replicationFactor = 5; + + // The logical service identifier shared by all members of the quorum. + private static logicalServiceId = System.getProperty("test.logicalServiceId","CI-HAJournal-1"); + + // The service directory. + // Note: Overridden by environment property when deployed. + private static serviceDir = new File(System.getProperty("test.serviceDir",ConfigMath.getAbsolutePath(new File(new File(fedname,logicalServiceId),"D")))); + //new File(new File(fedname,logicalServiceId),"D"); + + // journal data directory. + private static dataDir = serviceDir; + + // one federation, multicast discovery. + //static private groups = LookupDiscovery.ALL_GROUPS; + + // unicast discovery or multiple setups, MUST specify groups. + static private groups = new String[]{bigdata.fedname}; + + /** + * One or more unicast URIs of the form <code>jini://host/</code> + * or <code>jini://host:port/</code> (no default). + * + * This MAY be an empty array if you want to use multicast + * discovery <strong>and</strong> you have specified the groups as + * LookupDiscovery.ALL_GROUPS (a <code>null</code>). + */ + static private locators = new LookupLocator[] { + + // runs jini on the localhost using unicast locators. + new LookupLocator("jini://localhost/") + + }; + + /** + * A common point to set the Zookeeper client's requested + * sessionTimeout and the jini lease timeout. The default lease + * renewal period for jini is 5 minutes while for zookeeper it is + * more like 5 seconds. This puts the two systems onto a similar + * timeout period so that a disconnected client is more likely to + * be noticed in roughly the same period of time for either + * system. A value larger than the zookeeper default helps to + * prevent client disconnects under sustained heavy load. + * + * If you use a short lease timeout (LT 20s), then you need to override + * properties properties for the net.jini.lease.LeaseRenewalManager + * or it will run in a tight loop (it's default roundTripTime is 10s + * and it schedules lease renewals proactively.) + */ + + // jini + static private leaseTimeout = ConfigMath.s2ms(20); + + // zookeeper + static private sessionTimeout = (int)ConfigMath.s2ms(20); + + /* + * Configuration for default KB. + */ + + private static namespace = "kb"; + + private static kb = new NV[] { + + /* Setup for QUADS mode without the full text index. */ + + new NV(BigdataSail.Options.TRUTH_MAINTENANCE, "false" ), + new NV(BigdataSail.Options.QUADS, "true"), + new NV(BigdataSail.Options.STATEMENT_IDENTIFIERS, "false"), + new NV(BigdataSail.Options.TEXT_INDEX, "false"), + new NV(BigdataSail.Options.AXIOMS_CLASS,"com.bigdata.rdf.axioms.NoAxioms"), + new NV(BigdataSail.Options.QUERY_TIME_EXPANDER, "false"), + + // Bump up the branching factor for the lexicon indices on the named kb. + // com.bigdata.namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor=400 + new NV(com.bigdata.config.Configuration.getOverrideProperty + ( namespace + "." + LexiconRelation.NAME_LEXICON_RELATION, + IndexMetadata.Options.BTREE_BRANCHING_FACTOR + ), "400"), + + // Bump up the branching factor for the statement indices on the named kb. + // com.bigdata.namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor=1024 + new NV(com.bigdata.config.Configuration.getOverrideProperty + ( namespace + "." + SPORelation.NAME_SPO_RELATION, + IndexMetadata.Options.BTREE_BRANCHING_FACTOR + ), "1024"), + }; + +} + +/* + * Zookeeper client configuration. + */ +org.apache.zookeeper.ZooKeeper { + + /* Root znode for the federation instance. */ + zroot = "/" + bigdata.fedname; + + /* A comma separated list of host:port pairs, where the port is + * the CLIENT port for the zookeeper server instance. + */ + // standalone. + servers = "localhost:2081"; + + /* Session timeout (optional). */ + sessionTimeout = bigdata.sessionTimeout; + + /* + * ACL for the zookeeper nodes created by the bigdata federation. + * + * Note: zookeeper ACLs are not transmitted over secure channels + * and are placed into plain text Configuration files by the + * ServicesManagerServer. + */ + acl = new ACL[] { + + new ACL(ZooDefs.Perms.ALL, new Id("world", "anyone")) + + }; +} + +/* + * You should not have to edit below this line. + */ + +/* + * Jini client configuration. + */ +com.bigdata.service.jini.JiniClient { + + groups = bigdata.groups; + + locators = bigdata.locators; + + entries = new Entry[] { + + // Optional metadata entries. + new Name("D"), + + // Note: Used to assign the ServiceID to the service. + new ServiceUUID(UUID.fromString(System.getProperty("test.serviceId"))) + + }; + +} + +net.jini.lookup.JoinManager { + + maxLeaseDuration = bigdata.leaseTimeout; + +} + +/* + * Server configuration options. + */ +com.bigdata.journal.jini.ha.HAJournalServer { + + args = new String[] { + "-showversion", + "-Djava.security.policy=policy.all", + "-Dlog4j.configuration=file:log4j-D.properties", + "-Djava.util.logging.config.file=logging-D.properties", + "-server", + "-Xmx1G", + "-ea", + "-Xdebug","-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1053" + }; + + serviceDir = bigdata.serviceDir; + + // Default policy. + restorePolicy = new com.bigdata.journal.jini.ha.DefaultRestorePolicy(); + + // Suppress automatic snapshots. + snapshotPolicy = new com.bigdata.journal.jini.ha.NoSnapshotPolicy(); + + logicalServiceId = bigdata.logicalServiceId; + + writePipelineAddr = new InetSocketAddress("localhost",bigdata.haPort); + + /* + writePipelineAddr = new InetSocketAddress(// + InetAddress.getByName(// + NicUtil.getIpAddress("default.nic", "default", + false// loopbackOk + )), // + bigdata.haPort + ); + */ + + replicationFactor = bigdata.replicationFactor; + + // Use the overridden version of the HAJournal by default so we get the + // HAGlueTest API for every test. + HAJournalClass = "com.bigdata.journal.jini.ha.HAJournalTest"; + +} + +/* + * Journal configuration. + */ +com.bigdata.journal.jini.ha.HAJournal { + + properties = (NV[]) ConfigMath.concat(new NV[] { + + new NV(Options.FILE, + ConfigMath.getAbsolutePath(new File(bigdata.dataDir,"bigdata-ha.jnl"))), + + new NV(Options.BUFFER_MODE,""+BufferMode.DiskRW), + + new NV(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY,"4000"), + + new NV(IndexMetadata.Options.BTREE_BRANCHING_FACTOR,"128"), + + new NV(AbstractTransactionService.Options.MIN_RELEASE_AGE,"1"), + + }, bigdata.kb); + +} Added: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config (rev 0) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config 2014-04-02 13:12:56 UTC (rev 8026) @@ -0,0 +1,288 @@ +import net.jini.jeri.BasicILFactory; +import net.jini.jeri.BasicJeriExporter; +import net.jini.jeri.tcp.TcpServerEndpoint; + +import net.jini.discovery.LookupDiscovery; +import net.jini.core.discovery.LookupLocator; +import net.jini.core.entry.Entry; +import net.jini.lookup.entry.Name; +import net.jini.lookup.entry.Comment; +import net.jini.lookup.entry.Address; +import net.jini.lookup.entry.Location; +import net.jini.lookup.entry.ServiceInfo; +import net.jini.core.lookup.ServiceTemplate; + +import java.io.File; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.UUID; + +import com.bigdata.util.NV; +import com.bigdata.util.config.NicUtil; +import com.bigdata.journal.Options; +import com.bigdata.journal.BufferMode; +import com.bigdata.journal.jini.ha.HAJournal; +import com.bigdata.jini.lookup.entry.*; +import com.bigdata.service.IBigdataClient; +import com.bigdata.service.AbstractTransactionService; +import com.bigdata.service.jini.*; +import com.bigdata.service.jini.lookup.DataServiceFilter; +import com.bigdata.service.jini.master.ServicesTemplate; +import com.bigdata.jini.start.config.*; +import com.bigdata.jini.util.ConfigMath; + +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.data.ACL; +import org.apache.zookeeper.data.Id; + +// imports for various options. +import com.bigdata.btree.IndexMetadata; +import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.spo.SPORelation; +import com.bigdata.rdf.spo.SPOKeyOrder; +import com.bigdata.rdf.lexicon.LexiconRelation; +import com.bigdata.rdf.lexicon.LexiconKeyOrder; +import com.bigdata.rawstore.Bytes; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeUnit.*; + +/* + * This is a sample configuration file for a highly available Journal. A + * version of this file must be available to each HAJournalServer in the + * pipeline. + */ + +/* + * Globals. + */ +bigdata { + + private static fedname = "benchmark"; + + // NanoSparqlServer (http) port. + private static nssPort = ConfigMath.add(8090,4); + + // write replication pipeline port (listener). + private static haPort = ConfigMath.add(9090,4); + + // The #of services in the write pipeline. + private static replicationFactor = 5; + + // The logical service identifier shared by all members of the quorum. + private static logicalServiceId = System.getProperty("test.logicalServiceId","CI-HAJournal-1"); + + // The service directory. + // Note: Overridden by environment property when deployed. + private static serviceDir = new File(System.getProperty("test.serviceDir",ConfigMath.getAbsolutePath(new File(new File(fedname,logicalServiceId),"E")))); + //new File(new File(fedname,logicalServiceId),"E"); + + // journal data directory. + private static dataDir = serviceDir; + + // one federation, multicast discovery. + //static private groups = LookupDiscovery.ALL_GROUPS; + + // unicast discovery or multiple setups, MUST specify groups. + static private groups = new String[]{bigdata.fedname}; + + /** + * One or more unicast URIs of the form <code>jini://host/</code> + * or <code>jini://host:port/</code> (no default). + * + * This MAY be an empty array if you want to use multicast + * discovery <strong>and</strong> you have specified the groups as + * LookupDiscovery.ALL_GROUPS (a <code>null</code>). + */ + static private locators = new LookupLocator[] { + + // runs jini on the localhost using unicast locators. + new LookupLocator("jini://localhost/") + + }; + + /** + * A common point to set the Zookeeper client's requested + * sessionTimeout and the jini lease timeout. The default lease + * renewal period for jini is 5 minutes while for zookeeper it is + * more like 5 seconds. This puts the two systems onto a similar + * timeout period so that a disconnected client is more likely to + * be noticed in roughly the same period of time for either + * system. A value larger than the zookeeper default helps to + * prevent client disconnects under sustained heavy load. + * + * If you use a short lease timeout (LT 20s), then you need to override + * properties properties for the net.jini.lease.LeaseRenewalManager + * or it will run in a tight loop (it's default roundTripTime is 10s + * and it schedules lease renewals proactively.) + */ + + // jini + static private leaseTimeout = ConfigMath.s2ms(20); + + // zookeeper + static private sessionTimeout = (int)ConfigMath.s2ms(20); + + /* + * Configuration for default KB. + */ + + private static namespace = "kb"; + + private static kb = new NV[] { + + /* Setup for QUADS mode without the full text index. */ + + new NV(BigdataSail.Options.TRUTH_MAINTENANCE, "false" ), + new NV(BigdataSail.Options.QUADS, "true"), + new NV(BigdataSail.Options.STATEMENT_IDENTIFIERS, "false"), + new NV(BigdataSail.Options.TEXT_INDEX, "false"), + new NV(BigdataSail.Options.AXIOMS_CLASS,"com.bigdata.rdf.axioms.NoAxioms"), + new NV(BigdataSail.Options.QUERY_TIME_EXPANDER, "false"), + + // Bump up the branching factor for the lexicon indices on the named kb. + // com.bigdata.namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor=400 + new NV(com.bigdata.config.Configuration.getOverrideProperty + ( namespace + "." + LexiconRelation.NAME_LEXICON_RELATION, + IndexMetadata.Options.BTREE_BRANCHING_FACTOR + ), "400"), + + // Bump up the branching factor for the statement indices on the named kb. + // com.bigdata.namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor=1024 + new NV(com.bigdata.config.Configuration.getOverrideProperty + ( namespace + "." + SPORelation.NAME_SPO_RELATION, + IndexMetadata.Options.BTREE_BRANCHING_FACTOR + ), "1024"), + }; + +} + +/* + * Zookeeper client configuration. + */ +org.apache.zookeeper.ZooKeeper { + + /* Root znode for the federation instance. */ + zroot = "/" + bigdata.fedname; + + /* A comma separated list of host:port pairs, where the port is + * the CLIENT port for the zookeeper server instance. + */ + // standalone. + servers = "localhost:2081"; + + /* Session timeout (optional). */ + sessionTimeout = bigdata.sessionTimeout; + + /* + * ACL for the zookeeper nodes created by the bigdata federation. + * + * Note: zookeeper ACLs are not transmitted over secure channels + * and are placed into plain text Configuration files by the + * ServicesManagerServer. + */ + acl = new ACL[] { + + new ACL(ZooDefs.Perms.ALL, new Id("world", "anyone")) + + }; +} + +/* + * You should not have to edit below this line. + */ + +/* + * Jini client configuration. + */ +com.bigdata.service.jini.JiniClient { + + groups = bigdata.groups; + + locators = bigdata.locators; + + entries = new Entry[] { + + // Optional metadata entries. + new Name("E"), + + // Note: Used to assign the ServiceID to the service. + new ServiceUUID(UUID.fromString(System.getProperty("test.serviceId"))) + + }; + +} + +net.jini.lookup.JoinManager { + + maxLeaseDuration = bigdata.leaseTimeout; + +} + +/* + * Server configuration options. + */ +com.bigdata.journal.jini.ha.HAJournalServer { + + args = new String[] { + "-showversion", + "-Djava.security.policy=policy.all", + "-Dlog4j.configuration=file:log4j-E.properties", + "-Djava.util.logging.config.file=logging-E.properties", + "-server", + "-Xmx1G", + "-ea", + "-Xdebug","-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1054" + }; + + serviceDir = bigdata.serviceDir; + + // Default policy. + restorePolicy = new com.bigdata.journal.jini.ha.DefaultRestorePolicy(); + + // Suppress automatic snapshots. + snapshotPolicy = new com.bigdata.journal.jini.ha.NoSnapshotPolicy(); + + logicalServiceId = bigdata.logicalServiceId; + + writePipelineAddr = new InetSocketAddress("localhost",bigdata.haPort); + + /* + writePipelineAddr = new InetSocketAddress(// + InetAddress.getByName(// + NicUtil.getIpAddress("default.nic", "default", + false// loopbackOk + )), // + bigdata.haPort + ); + */ + + replicationFactor = bigdata.replicationFactor; + + // Use the overridden version of the HAJournal by default so we get the + // HAGlueTest API for every test. + HAJournalClass = "com.bigdata.journal.jini.ha.HAJournalTest"; + +} + +/* + * Journal configuration. + */ +com.bigdata.journal.jini.ha.HAJournal { + + properties = (NV[]) ConfigMath.concat(new NV[] { + + new NV(Options.FILE, + ConfigMath.getAbsolutePath(new File(bigdata.dataDir,"bigdata-ha.jnl"))), + + new NV(Options.BUFFER_MODE,""+BufferMode.DiskRW), + + new NV(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY,"4000"), + + new NV(IndexMetadata.Options.BTREE_BRANCHING_FACTOR,"128"), + + new NV(AbstractTransactionService.Options.MIN_RELEASE_AGE,"1"), + + }, bigdata.kb); + +} Added: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java (rev 0) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java 2014-04-02 13:12:56 UTC (rev 8026) @@ -0,0 +1,114 @@ +package com.bigdata.journal.jini.ha; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import com.bigdata.ha.HAGlue; +import com.bigdata.ha.HAStatusEnum; + +import net.jini.config.Configuration; + + +public class TestHA1JournalServer extends AbstractHA3JournalServerTestCase { + + /** + * {@inheritDoc} + * <p> + * Note: This overrides some {@link Configuration} values for the + * {@link HAJournalServer} in order to establish conditions suitable for + * testing the {@link ISnapshotPolicy} and {@link IRestorePolicy}. + */ + @Override + protected String[] getOverrides() { + + return new String[]{ +// "com.bigdata.journal.HAJournal.properties=" +TestHA3JournalServer.getTestHAJournalProperties(com.bigdata.journal.HAJournal.properties), + "com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy(0L,1,0)", + "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", +// "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"", + "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", + "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=1", + }; + + } + + protected String getZKConfigFile() { + return "zkClient1.config"; // 1 stage pipeline + } + + public TestHA1JournalServer() { + } + + public TestHA1JournalServer(String name) { + super(name); + } + + public void testStartA() throws Exception { + doStartA(); + } + + protected void doStartA() throws Exception { + + try { + quorum.awaitQuorum(awaitQuorumTimeout, + TimeUnit.MILLISECONDS); + + fail("HA1 requires quorum of 1!"); + } catch (TimeoutException te) { + // expected + } + + // Start 1 service. + final HAGlue serverA = startA(); + + // this should succeed + final long token = quorum.awaitQuorum(awaitQuorumTimeout, + TimeUnit.MILLISECONDS); + + assertEquals(token, awaitFullyMetQuorum()); + + final HAGlue leader = quorum.getClient().getLeader(token); + + assertEquals(serverA, leader); + } + + public void testSimpleTransaction() throws Exception { + doStartA(); + + serverA.awaitHAReady(2, TimeUnit.SECONDS); + + /* + * Awaiting HAReady is not sufficient since the service may still + * writing the initial transaction. + * + * So it seems that the problem is not so much with HA1 as rather the + * status of a new journal being ready too soon to process an NSS + * request + */ + + awaitCommitCounter(1, new HAGlue[] { serverA}); + + // Thread.sleep(100); + + // serverA. + + log.warn("Calling SimpleTransaction"); + simpleTransaction(); + + awaitCommitCounter(2, new HAGlue[] { serverA}); + } + + public void testMultiTransaction() throws Exception { + doStartA(); + + awaitCommitCounter(1, new HAGlue[] { serverA}); + // Thread.sleep(1000); + + final int NTRANS = 10; + for (int t = 0; t < NTRANS; t++) { + simpleTransaction(); + } + + awaitCommitCounter(NTRANS+1, new HAGlue[] { serverA}); +} +} Added: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java (rev 0) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-02 13:12:56 UTC (rev 8026) @@ -0,0 +1,530 @@ +package com.bigdata.journal.jini.ha; + +import java.util.UUID; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import com.bigdata.ha.HAGlue; +import com.bigdata.ha.HAStatusEnum; +import com.bigdata.ha.msg.HARootBlockRequest; +import com.bigdata.ha.msg.HASnapshotRequest; +import com.bigdata.ha.msg.IHASnapshotResponse; +import com.bigdata.journal.IRootBlockView; +import com.bigdata.journal.Journal; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.LargeLoadTask; +import com.bigdata.rdf.sail.webapp.client.RemoteRepository; + +import net.jini.config.Configuration; + +public class TestHA1SnapshotPolicy extends AbstractHA3BackupTestCase { + + public TestHA1SnapshotPolicy() { + } + + public TestHA1SnapshotPolicy(String name) { + super(name); + } + + protected String getZKConfigFile() { + return "zkClient1.config"; // 1 stage pipeline + } + + /** + * {@inheritDoc} + * <p> + * Note: This overrides some {@link Configuration} values for the + * {@link HAJournalServer} in order to establish conditions suitable for + * testing the {@link ISnapshotPolicy} and {@link IRestorePolicy}. + */ + @Override + protected String[] getOverrides() { + + /* + * We need to set the time at which the DefaultSnapshotPolicy runs to + * some point in the Future in order to avoid test failures due to + * violated assumptions when the policy runs up self-triggering (based + * on the specified run time) during a CI run. + */ + final String neverRun = getNeverRunSnapshotTime(); + + /* + * For HA1, must have onlineDisasterRecovery to ensure logs are maintained + */ + return new String[]{ + "com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy(0L,1,0)", + "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.DefaultSnapshotPolicy("+neverRun+",0)", + // "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", + "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", + "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=1", + }; + + } + + /** + * Start a service. The quorum meets. Take a snapshot. Verify that the + * snapshot appears within a reasonable period of time and that it is for + * <code>commitCounter:=1</code> (just the KB create). Verify that the + * digest of the snapshot agrees with the digest of the journal. + */ + public void testA_snapshot() throws Exception { + + // Start 1 service. + final HAGlue serverA = startA(); + + // Wait for a quorum meet. + final long token = quorum.awaitQuorum(awaitQuorumTimeout, + TimeUnit.MILLISECONDS); + + // Await initial commit point (KB create). + awaitCommitCounter(1L, serverA); + + final HAGlue leader = quorum.getClient().getLeader(token); + assertEquals(serverA, leader); // A is the leader. + { + + // Verify quorum is still valid. + quorum.assertQuorum(token); + + // Verify quorum is at the expected commit point. + assertEquals( + 1L, + leader.getRootBlock( + new HARootBlockRequest(null/* storeUUID */)) + .getRootBlock().getCommitCounter()); + + // Snapshot directory is empty. + assertEquals(0, recursiveCount(getSnapshotDirA(),SnapshotManager.SNAPSHOT_FILTER)); + + final Future<IHASnapshotResponse> ft = leader + .takeSnapshot(new HASnapshotRequest(0/* percentLogSize */)); + + // wait for the snapshot. + try { + ft.get(5, TimeUnit.SECONDS); + } catch (TimeoutException ex) { + ft.cancel(true/* mayInterruptIfRunning */); + throw ex; + } + + final IRootBlockView snapshotRB = ft.get().getRootBlock(); + + final long commitCounter = 1L; + + // Verify snapshot is for the expected commit point. + assertEquals(commitCounter, snapshotRB.getCommitCounter()); + + // Snapshot directory contains the desired filename. + assertExpectedSnapshots(getSnapshotDirA(), + new long[] { commitCounter }); + + // Verify digest of snapshot agrees with digest of journal. + assertSnapshotDigestEquals(leader, commitCounter); + + } + + } + /** + * Start service. The quorum meets. Take a snapshot. Verify that the + * snapshot appears within a resonable period of time and that it is for + * <code>commitCounter:=1</code> (just the KB create). Request a second + * snapshot for the same commit point and verify that a <code>null</code> is + * returned since we already have a snapshot for that commit point. + */ + public void testA_snapshot_await_snapshot_null() throws Exception { + + // Start 2 services. + final HAGlue serverA = startA(); + + // Wait for a quorum meet. + final long token = quorum.awaitQuorum(awaitQuorumTimeout, + TimeUnit.MILLISECONDS); + + // Await initial commit point (KB create). + awaitCommitCounter(1L, serverA); + + final HAGlue leader = quorum.getClient().getLeader(token); + + { + + // Verify quorum is still valid. + quorum.assertQuorum(token); + + // Verify quorum is at the expected commit point. + assertEquals( + 1L, + leader.getRootBlock( + new HARootBlockRequest(null/* storeUUID */)) + .getRootBlock().getCommitCounter()); + + // Snapshot directory is empty. + assertEquals(0, recursiveCount(getSnapshotDirA(),SnapshotManager.SNAPSHOT_FILTER)); + + final Future<IHASnapshotResponse> ft = leader + .takeSnapshot(new HASnapshotRequest(0/* percentLogSize */)); + + // wait for the snapshot. + try { + ft.get(5, TimeUnit.SECONDS); + } catch (TimeoutException ex) { + ft.cancel(true/* mayInterruptIfRunning */); + throw ex; + } + + final IRootBlockView snapshotRB = ft.get().getRootBlock(); + + final long commitCounter = 1L; + + // Verify snapshot is for the expected commit point. + assertEquals(commitCounter, snapshotRB.getCommitCounter()); + + // Snapshot directory contains the expected snapshot(s). + assertExpectedSnapshots(getSnapshotDirA(), + new long[] { commitCounter }); + + // Verify digest of snapshot agrees with digest of journal. + assertSnapshotDigestEquals(leader, commitCounter); + + } + + /* + * Verify 2nd request returns null since snapshot exists for that + * commit point. + */ + { + + // Verify quorum is still at the expected commit point. + assertEquals( + 1L, + leader.getRootBlock( + new HARootBlockRequest(null/* storeUUID */)) + .getRootBlock().getCommitCounter()); + + // request another snapshot. + final Future<IHASnapshotResponse> ft = leader + .takeSnapshot(new HASnapshotRequest(0/* percentLogSize */)); + + if (ft != null) { + + ft.cancel(true/* mayInteruptIfRunning */); + + fail("Expecting null since snapshot exists for current commit point."); + + } + + } + + } + + /** + * Test ability to request a snapshot using an HTTP GET + * <code>.../status?snapshot</code>. + * + * TODO Variant where the percentLogSize parameter is also expressed and + * verify that the semantics of that argument are obeyed. Use this to verify + * that the server will not take snapshot if size on disk of HALog files + * since the last snapshot is LT some percentage. + */ + public void testA_snapshot_HTTP_GET() throws Exception { + + // Start 2 services. + final HAGlue serverA = startA(); + + // Wait for a quorum meet. + final long token = quorum.awaitQuorum(awaitQuorumTimeout, + TimeUnit.MILLISECONDS); + + // Await initial commit point (KB create). + awaitCommitCounter(1L, serverA); + + final HAGlue leader = quorum.getClient().getLeader(token); + + { + + // Verify quorum is still valid. + quorum.assertQuorum(token); + + // Verify quorum is at the expected commit point. + assertEquals( + 1L, + leader.getRootBlock( + new HARootBlockRequest(null/* storeUUID */)) + .getRootBlock().getCommitCounter()); + + // Snapshot directory is empty. + assertEquals(0, recursiveCount(getSnapshotDirA(),SnapshotManager.SNAPSHOT_FILTER)); + + doSnapshotRequest(leader); + + /* + * Get the Future. Should still be there, but if not then will be + * null (it which case the snapshot is already done). + */ + f... [truncated message content] |
From: <mar...@us...> - 2014-04-03 09:48:22
|
Revision: 8032 http://sourceforge.net/p/bigdata/code/8032 Author: martyncutcher Date: 2014-04-03 09:48:19 +0000 (Thu, 03 Apr 2014) Log Message: ----------- Commit to allow branch to be added to CI. Note that this includes a delay added to AbstractJournal.gatherPhase() to support HA1 and which must be removed once the cause of its necessity is identified. Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/RWStrategy.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/WORMStrategy.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2014-04-03 00:41:27 UTC (rev 8031) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2014-04-03 09:48:19 UTC (rev 8032) @@ -310,7 +310,7 @@ /** * When a record is used as a read cache then the readCount is - * maintained as a metric on its access. �This could be used to + * maintained as a metric on its access. ???This could be used to * determine eviction/compaction. * <p> * Note: volatile to guarantee visibility of updates. Might do better @@ -509,7 +509,8 @@ * @param isHighlyAvailable * when <code>true</code> the whole record checksum is maintained * for use when replicating the write cache along the write - * pipeline. + * pipeline. This needs to be <code>true</code> for HA1 as well + * since we need to write the HALog. * @param bufferHasData * when <code>true</code> the caller asserts that the buffer has * data (from a replicated write), in which case the position Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-03 00:41:27 UTC (rev 8031) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-03 09:48:19 UTC (rev 8032) @@ -1151,6 +1151,7 @@ done = WriteCache.transferTo(cache/* src */, curCompactingCache/* dst */, serviceMap, 0/*threshold*/); if (done) { + // Everything was compacted. Send just the address metadata (empty cache block). sendAddressMetadata(cache); if (log.isDebugEnabled()) @@ -1164,7 +1165,7 @@ */ if (flush) { /* - * Send out the full cache block. + * Send out the full cache block. FIXME Why are we not calling sendAddressMetadata() here? */ writeCacheBlock(curCompactingCache); addClean(curCompactingCache, true/* addFirst */); @@ -1231,7 +1232,7 @@ * been allocated on the leader in the same order in which the leader * made those allocations. This information is used to infer the order * in which the allocators for the different allocation slot sizes are - * created. This method will synchronous send those address notices and + * created. This method will synchronously send those address notices and * and also makes sure that the followers see the recycled addresses * records so they can keep both their allocators and the actual * allocations synchronized with the leader. @@ -1249,8 +1250,9 @@ throws IllegalStateException, InterruptedException, ExecutionException, IOException { - if (quorum == null || !quorum.isHighlyAvailable() - || !quorum.getClient().isLeader(quorumToken)) { +// if (quorum == null || !quorum.isHighlyAvailable() +// || !quorum.getClient().isLeader(quorumToken)) { + if (quorum == null) { return; } @@ -1354,7 +1356,7 @@ * unit tests need to be updated to specify [isHighlyAvailable] for * ALL quorum based test runs. */ - final boolean isHA = quorum != null && quorum.isHighlyAvailable(); + final boolean isHA = quorum != null; // IFF HA and this is the quorum leader. final boolean isHALeader = isHA @@ -1441,10 +1443,12 @@ quorumMember.logWriteCacheBlock(pkg.getMessage(), pkg.getData().duplicate()); // ASYNC MSG RMI + NIO XFER. - remoteWriteFuture = quorumMember.replicate(null/* req */, pkg.getMessage(), - pkg.getData().duplicate()); - - counters.get().nsend++; + if (quorum.replicationFactor() > 1) { + remoteWriteFuture = quorumMember.replicate(null/* req */, pkg.getMessage(), + pkg.getData().duplicate()); + + counters.get().nsend++; + } /* * The quorum leader logs the write cache block here. For the Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-03 00:41:27 UTC (rev 8031) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-03 09:48:19 UTC (rev 8032) @@ -2473,18 +2473,18 @@ } - /** - * Return <code>true</code> if the journal is configured for high - * availability. - * - * @see QuorumManager#isHighlyAvailable() - */ - public boolean isHighlyAvailable() { +// /** +// * Return <code>true</code> if the journal is configured for high +// * availability. +// * +// * @see Quorum#isHighlyAvailable() +// */ +// public boolean isHighlyAvailable() { +// +// return quorum == null ? false : quorum.isHighlyAvailable(); +// +// } - return quorum == null ? false : quorum.isHighlyAvailable(); - - } - /** * {@inheritDoc} * <p> @@ -3428,8 +3428,16 @@ if (quorum == null) return; -// if (!quorum.isHighlyAvailable()) -// return; + if (!quorum.isHighlyAvailable()) { + // FIXME: Find the reason why this delay is needed and remove it! + // + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + return; + } /** * CRITICAL SECTION. We need obtain a distributed consensus for the @@ -3542,6 +3550,19 @@ // reload the commit record from the new root block. store._commitRecord = store._getCommitRecord(); + if (quorum != null) { + /* + * Write the root block on the HALog file, closing out that + * file. + */ + final QuorumService<HAGlue> localService = quorum.getClient(); + try { + localService.logRootBlock(newRootBlock); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + if (txLog.isInfoEnabled()) txLog.info("COMMIT: commitTime=" + commitTime); @@ -3792,7 +3813,7 @@ if (log.isInfoEnabled()) log.info("commitTime=" + commitTime); - final CommitState cs = new CommitState(this, commitTime); + final CommitState cs = new CommitState(this, commitTime); /* * Flush application data, decide whether or not the store is dirty, @@ -3808,6 +3829,7 @@ } // Do GATHER (iff HA). + cs.gatherPhase(); /* @@ -3846,12 +3868,12 @@ // Prepare the new root block. cs.newRootBlock(); - if (quorum == null) { + if (quorum == null || quorum.replicationFactor() == 1) { // Non-HA mode. cs.commitSimple(); - } else { + } else { // HA mode commit (2-phase commit). cs.commitHA(); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/RWStrategy.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2014-04-03 00:41:27 UTC (rev 8031) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2014-04-03 09:48:19 UTC (rev 8032) @@ -146,6 +146,7 @@ } + @Override public ByteBuffer read(final long addr) { try { Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/WORMStrategy.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2014-04-03 00:41:27 UTC (rev 8031) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2014-04-03 09:48:19 UTC (rev 8032) @@ -272,7 +272,7 @@ * which use this flag to conditionally track the checksum of the entire * write cache buffer). */ - private final boolean isHighlyAvailable; + private final boolean isQuorumUsed; /** * The {@link UUID} which identifies the journal (this is the same for each @@ -970,11 +970,11 @@ com.bigdata.journal.Options.HALOG_COMPRESSOR, com.bigdata.journal.Options.DEFAULT_HALOG_COMPRESSOR); - isHighlyAvailable = quorum != null && quorum.isHighlyAvailable(); + isQuorumUsed = quorum != null; // && quorum.isHighlyAvailable(); final boolean useWriteCacheService = fileMetadata.writeCacheEnabled && !fileMetadata.readOnly && fileMetadata.closeTime == 0L - || isHighlyAvailable; + || isQuorumUsed; if (useWriteCacheService) { /* @@ -1049,7 +1049,7 @@ final long fileExtent) throws InterruptedException { - super(baseOffset, buf, useChecksum, isHighlyAvailable, + super(baseOffset, buf, useChecksum, isQuorumUsed, bufferHasData, opener, fileExtent); } @@ -1379,6 +1379,7 @@ * to get the data from another node based on past experience for that * record. */ + @Override public ByteBuffer read(final long addr) { try { Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-03 00:41:27 UTC (rev 8031) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-03 09:48:19 UTC (rev 8032) @@ -688,7 +688,7 @@ throws InterruptedException { super(buf, useChecksum, m_quorum != null - && m_quorum.isHighlyAvailable(), bufferHasData, opener, + /*&& m_quorum.isHighlyAvailable()*/, bufferHasData, opener, fileExtent, m_bufferedWrite); @@ -1083,7 +1083,7 @@ final boolean highlyAvailable = m_quorum != null && m_quorum.isHighlyAvailable(); - final boolean prefixWrites = highlyAvailable; + final boolean prefixWrites = m_quorum != null; // highlyAvailable return new RWWriteCacheService(m_writeCacheBufferCount, m_minCleanListSize, m_readCacheBufferCount, prefixWrites, m_compactionThreshold, m_hotCacheSize, m_hotCacheThreshold, Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java 2014-04-03 00:41:27 UTC (rev 8031) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java 2014-04-03 09:48:19 UTC (rev 8032) @@ -80,8 +80,9 @@ final long fileExtent) throws InterruptedException { - final boolean highlyAvailable = getQuorum() != null - && getQuorum().isHighlyAvailable(); +// final boolean highlyAvailable = getQuorum() != null +// && getQuorum().isHighlyAvailable(); + final boolean highlyAvailable = getQuorum() != null; return new FileChannelScatteredWriteCache(buf, true/* useChecksum */, highlyAvailable, Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-03 00:41:27 UTC (rev 8031) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-03 09:48:19 UTC (rev 8032) @@ -184,7 +184,7 @@ // Verify journal can be dumped without error. dumpJournal(jnl); - + /* * Now roll that journal forward using the HALog directory. */ Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-03 00:41:27 UTC (rev 8031) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-03 09:48:19 UTC (rev 8032) @@ -56,7 +56,7 @@ "com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy(0L,1,0)", "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.DefaultSnapshotPolicy("+neverRun+",0)", // "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", - "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", + // "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=1", }; Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2014-04-03 00:41:27 UTC (rev 8031) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2014-04-03 09:48:19 UTC (rev 8032) @@ -36,8 +36,11 @@ import org.openrdf.model.impl.URIImpl; import org.openrdf.model.vocabulary.RDF; +import com.bigdata.ha.HAGlue; +import com.bigdata.ha.QuorumService; import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.IIndexManager; +import com.bigdata.quorum.Quorum; import com.bigdata.rdf.axioms.Axioms; import com.bigdata.rdf.axioms.NoAxioms; import com.bigdata.rdf.axioms.OwlAxioms; @@ -641,7 +644,10 @@ final AbstractJournal jnl = (AbstractJournal) indexManager; - if (jnl.isHighlyAvailable()) { + final Quorum<HAGlue, QuorumService<HAGlue>> quorum = jnl + .getQuorum(); + + if (quorum != null && quorum.isHighlyAvailable()) { g.add(aService, SD.feature, HighlyAvailable); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-04-03 00:41:27 UTC (rev 8031) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-04-03 09:48:19 UTC (rev 8032) @@ -55,10 +55,13 @@ import com.bigdata.bop.engine.QueryLog; import com.bigdata.bop.fed.QueryEngineFactory; import com.bigdata.counters.CounterSet; +import com.bigdata.ha.HAGlue; +import com.bigdata.ha.QuorumService; import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.DumpJournal; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.Journal; +import com.bigdata.quorum.Quorum; import com.bigdata.rdf.sail.sparql.ast.SimpleNode; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.RunningQuery; @@ -497,13 +500,18 @@ // final boolean showQuorum = req.getParameter(SHOW_QUORUM) != null; - if (getIndexManager() instanceof AbstractJournal - && ((AbstractJournal) getIndexManager()) - .isHighlyAvailable()) { + if (getIndexManager() instanceof AbstractJournal) { - new HAStatusServletUtil(getIndexManager()). - doGet(req, resp, current); + final Quorum<HAGlue, QuorumService<HAGlue>> quorum = ((AbstractJournal) getIndexManager()) + .getQuorum(); + if (quorum != null && quorum.isHighlyAvailable()) { + + new HAStatusServletUtil(getIndexManager()).doGet(req, resp, + current); + + } + } current.node("br", "Accepted query count=" This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-06 15:09:20
|
Revision: 8061 http://sourceforge.net/p/bigdata/code/8061 Author: thompsonbry Date: 2014-04-06 15:09:14 +0000 (Sun, 06 Apr 2014) Log Message: ----------- Reconciled my version and Martyns with respect to the HA1/HA5 edits. See #721 (HA1) See #722 (HA5) Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-05 15:59:33 UTC (rev 8060) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-06 15:09:14 UTC (rev 8061) @@ -3555,13 +3555,17 @@ * file. */ final QuorumService<HAGlue> localService = quorum.getClient(); - try { - localService.logRootBlock(newRootBlock); - } catch (IOException e) { - throw new RuntimeException(e); + if (localService != null) { + // Quorum service not asynchronously closed. + try { + // Write the closing root block on the HALog file. + localService.logRootBlock(newRootBlock); + } catch (IOException e) { + throw new RuntimeException(e); + } } } - + if (txLog.isInfoEnabled()) txLog.info("COMMIT: commitTime=" + commitTime); @@ -3812,7 +3816,7 @@ if (log.isInfoEnabled()) log.info("commitTime=" + commitTime); - final CommitState cs = new CommitState(this, commitTime); + final CommitState cs = new CommitState(this, commitTime); /* * Flush application data, decide whether or not the store is dirty, @@ -3828,7 +3832,6 @@ } // Do GATHER (iff HA). - cs.gatherPhase(); /* @@ -3872,7 +3875,7 @@ // Non-HA mode. cs.commitSimple(); - } else { + } else { // HA mode commit (2-phase commit). cs.commitHA(); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-05 15:59:33 UTC (rev 8060) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-06 15:09:14 UTC (rev 8061) @@ -1080,8 +1080,8 @@ private RWWriteCacheService newWriteCacheService() { try { - final boolean highlyAvailable = m_quorum != null - && m_quorum.isHighlyAvailable(); +// final boolean highlyAvailable = m_quorum != null +// && m_quorum.isHighlyAvailable(); final boolean prefixWrites = m_quorum != null; // highlyAvailable @@ -1089,7 +1089,8 @@ m_minCleanListSize, m_readCacheBufferCount, prefixWrites, m_compactionThreshold, m_hotCacheSize, m_hotCacheThreshold, convertAddr(m_fileSize), m_reopener, m_quorum, this) { - + + @Override @SuppressWarnings("unchecked") public WriteCache newWriteCache(final IBufferAccess buf, final boolean useChecksum, Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java 2014-04-05 15:59:33 UTC (rev 8060) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java 2014-04-06 15:09:14 UTC (rev 8061) @@ -517,10 +517,11 @@ final long commitCounter = SnapshotManager .parseCommitCounterFile(journalFile.getName()); + // TODO Note: Accept version from main development branch when merging versions. // temporary file in the same directory as the snapshot. - final File out = File.createTempFile("" + commitCounter + "-", - Journal.Options.JNL, journalFile.getAbsoluteFile() - .getParentFile()); + final File out = File.createTempFile("HARestore-TMP" + + commitCounter + "-", Journal.Options.JNL, journalFile + .getAbsoluteFile().getParentFile()); System.out.println("Decompressing " + in + " to " + out); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2014-04-05 15:59:33 UTC (rev 8060) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2014-04-06 15:09:14 UTC (rev 8061) @@ -164,12 +164,29 @@ + "KB/IsolatableIndices"); /** - * A highly available deployment. + * A highly available deployment - this feature refers to the presence of + * the {@link HAGlue} interface, the capability for online backups, and the + * existence of a targer {@link #ReplicationFactor}. You must consult the + * target {@link #ReplicationFactor} in order to determine whether the + * database is in principle capable of tolerating one or more failures and + * the actual #of running joined instances to determine whether the database + * can withstand a failure. */ static public final URI HighlyAvailable = new URIImpl(BDFNS + "HighlyAvailable"); /** + * The value of this feature is the target replication factor for the + * database expressed as an <code>xsd:int</code>. If this is ONE (1), then + * the database is setup with a quorum and has the capability for online + * backup, but it is not replicated. TWO (2) indicates mirroring, but is not + * highly available. THREE (3) is the minimum configuration that can + * withstand a failure. + */ + static public final URI ReplicationFactor = new URIImpl(BDFNS + + "replicationCount"); + + /** * An {@link IBigdataFederation}. */ static public final URI ScaleOut = new URIImpl(BDFNS @@ -647,8 +664,13 @@ final Quorum<HAGlue, QuorumService<HAGlue>> quorum = jnl .getQuorum(); - if (quorum != null && quorum.isHighlyAvailable()) { + if (quorum != null) { + final int k = quorum.replicationFactor(); + + g.add(aService, SD.ReplicationFactor, tripleStore + .getValueFactory().createLiteral(k)); + g.add(aService, SD.feature, HighlyAvailable); } Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-04-05 15:59:33 UTC (rev 8060) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-04-06 15:09:14 UTC (rev 8061) @@ -505,7 +505,7 @@ final Quorum<HAGlue, QuorumService<HAGlue>> quorum = ((AbstractJournal) getIndexManager()) .getQuorum(); - if (quorum != null && quorum.isHighlyAvailable()) { + if (quorum != null) {//&& quorum.isHighlyAvailable()) { new HAStatusServletUtil(getIndexManager()).doGet(req, resp, current); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-04-09 07:44:35
|
Revision: 8090 http://sourceforge.net/p/bigdata/code/8090 Author: martyncutcher Date: 2014-04-09 07:44:27 +0000 (Wed, 09 Apr 2014) Log Message: ----------- For ticket #721: fix to BufferedWrite to ensure buffers are zero padded to the slot size when eliding contiguous writes. This caused a potential problem with binary equivalence for snapshots. Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-08 22:24:37 UTC (rev 8089) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-09 07:44:27 UTC (rev 8090) @@ -137,6 +137,9 @@ } + // Used to zero pad slots in buffered writes + final byte[] s_zeros = new byte[256]; + /** * Buffer a write. * @@ -188,6 +191,19 @@ } // copy the caller's record into the buffer. m_data.put(data); + + // if data_len < slot_len then clear remainder of buffer + int padding = slot_len - data_len; + while (padding > 0) { + if (padding > s_zeros.length) { + m_data.put(s_zeros); + padding -= s_zeros.length; + } else { + m_data.put(s_zeros, 0, padding); + break; + } + } + // update the file offset by the size of the allocation slot m_endAddr += slot_len; // update the buffer position by the size of the allocation slot. @@ -250,8 +266,9 @@ final ByteBuffer m_data = tmp.buffer(); // reset the buffer state. - m_data.position(0); - m_data.limit(m_data.capacity()); + //m_data.position(0); + //m_data.limit(m_data.capacity()); + m_data.clear(); m_startAddr = -1; m_endAddr = 0; Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2014-04-08 22:24:37 UTC (rev 8089) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2014-04-09 07:44:27 UTC (rev 8090) @@ -839,6 +839,7 @@ m_statsBucket.allocate(size); } + return value; } else { StringBuilder sb = new StringBuilder(); @@ -1300,4 +1301,33 @@ return count; } + /** + * Determines if the provided physical address is within an allocated slot + * @param addr + * @return + */ + public boolean verifyAllocatedAddress(long addr) { + if (log.isTraceEnabled()) + log.trace("Checking Allocator " + m_index + ", size: " + m_size); + + final Iterator<AllocBlock> blocks = m_allocBlocks.iterator(); + final long range = m_size * m_bitSize * 32; + while (blocks.hasNext()) { + final int startAddr = blocks.next().m_addr; + if (startAddr != 0) { + final long start = RWStore.convertAddr(startAddr); + final long end = start + range; + + if (log.isTraceEnabled()) + log.trace("Checking " + addr + " between " + start + " - " + end); + + if (addr >= start && addr < end) + return true; + } else { + break; + } + } + return false; + } + } Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-08 22:24:37 UTC (rev 8089) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-09 07:44:27 UTC (rev 8090) @@ -6963,7 +6963,7 @@ if (log.isDebugEnabled()) log.debug("writeRaw: " + offset); - + // Guard IO against concurrent file extension. final Lock lock = m_extensionLock.readLock(); @@ -7068,6 +7068,22 @@ } } + /** + * Can be used to determine if an address is within an allocated slot. + * + * @param addr + * @return whether addr is within slot allocated area + */ + public boolean verifyAllocatedAddress(final long addr) { + for (int index = 0; index < m_allocs.size(); index++) { + final FixedAllocator xfa = m_allocs.get(index); + if (xfa.verifyAllocatedAddress(addr)) + return true; + } + + return false; + } + public StoreState getStoreState() { final RWStoreState ret = new RWStoreState(this); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-08 22:24:37 UTC (rev 8089) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-09 07:44:27 UTC (rev 8090) @@ -6,6 +6,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import net.jini.config.Configuration; + import com.bigdata.ha.HAGlue; import com.bigdata.ha.HAStatusEnum; import com.bigdata.ha.msg.HARootBlockRequest; @@ -13,11 +15,8 @@ import com.bigdata.ha.msg.IHASnapshotResponse; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.Journal; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.LargeLoadTask; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; -import net.jini.config.Configuration; - public class TestHA1SnapshotPolicy extends AbstractHA3BackupTestCase { public TestHA1SnapshotPolicy() { @@ -438,8 +437,8 @@ */ public void testA_snapshot_multipleTx_restore_validate() throws Exception { - final int N1 = 7; // #of transactions to run before the snapshot. - final int N2 = 8; // #of transactions to run after the snapshot. + final int N1 = 7; //7; // #of transactions to run before the snapshot. + final int N2 = 8; //8; // #of transactions to run after the snapshot. // Start service. final HAGlue serverA = startA(); @@ -459,13 +458,13 @@ // Now run N transactions. for (int i = 0; i < N1; i++) { + + simpleTransaction(); - simpleTransaction(); - } + + final long commitCounterN1 = N1 + 1; - final long commitCounterN1 = N1 + 1; - awaitCommitCounter(commitCounterN1, serverA); /* @@ -478,7 +477,7 @@ // Snapshot directory is empty. assertEquals(1, recursiveCount(getSnapshotDirA(),SnapshotManager.SNAPSHOT_FILTER)); - + // request snapshot on A. final Future<IHASnapshotResponse> ft = serverA .takeSnapshot(new HASnapshotRequest(0/* percentLogSize */)); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-15 13:01:37
|
Revision: 8121 http://sourceforge.net/p/bigdata/code/8121 Author: thompsonbry Date: 2014-04-15 13:01:24 +0000 (Tue, 15 Apr 2014) Log Message: ----------- Caught up the HA1/HA5 branch with changes in the main development branch prior to bringing back the HA1/HA5 branch to the main development branch. See #722 (HA1) See #723 (HA5) Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/ProcessReaderHelper.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/win/TypeperfCollector.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/FullTextIndex.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/ReadIndexTask.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractSubtask.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/util/CSVReader.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/cache/StressTestGlobalLRU.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithRedirect.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithSplits.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTSearchOptimizer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SearchServiceFactory.java branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNamedGraphs.java branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestUnions.java branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/BIGDATA_MGC_HA1_HA5/build.xml branches/BIGDATA_MGC_HA1_HA5/src/resources/HAJournal/HAJournal.config branches/BIGDATA_MGC_HA1_HA5/src/resources/bin/startHAServices branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/init.d/bigdataHA Added Paths: ----------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.rq branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.srx branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.trig branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888b.rq branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.rq branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.srx branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.ttl branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.rq branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.srx branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.ttl branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874b.rq branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/831.rq branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/831.ttl branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/874.rq branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/874.ttl branches/BIGDATA_MGC_HA1_HA5/src/resources/bin/HARestore branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/ branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/bigdata/ branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/bigdataHA Removed Paths: ------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/bigdata/ branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/bigdata/ branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/bigdataHA Property Changed: ---------------- branches/BIGDATA_MGC_HA1_HA5/ branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/htree/raba/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/jsr166/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/bop/joinGraph/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/bop/util/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/jsr166/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/util/httpd/ branches/BIGDATA_MGC_HA1_HA5/bigdata-compatibility/ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/attr/ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/disco/ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/util/config/ branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/ branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/btc/ branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/btc/src/resources/ branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/lubm/ branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/uniprot/ branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/uniprot/src/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/error/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/samples/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/LEGAL/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/lib/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/java/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/java/it/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/java/it/unimi/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/test/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/test/it/unimi/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/test/it/unimi/dsi/ branches/BIGDATA_MGC_HA1_HA5/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/BIGDATA_MGC_HA1_HA5/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/BIGDATA_MGC_HA1_HA5/osgi/ branches/BIGDATA_MGC_HA1_HA5/src/resources/bin/config/ Index: branches/BIGDATA_MGC_HA1_HA5 =================================================================== --- branches/BIGDATA_MGC_HA1_HA5 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5 2014-04-15 13:01:24 UTC (rev 8121) Property changes on: branches/BIGDATA_MGC_HA1_HA5 ___________________________________________________________________ Modified: svn:mergeinfo ## -1,5 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0:8025-8120 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 \ No newline at end of property Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty 2014-04-15 13:01:24 UTC (rev 8121) Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty ___________________________________________________________________ Modified: svn:mergeinfo ## -1,5 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty:8025-8120 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/MGC_1_3_0/bigdata/lib/jetty:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate 2014-04-15 13:01:24 UTC (rev 8121) Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate ___________________________________________________________________ Modified: svn:mergeinfo ## -1,5 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:8025-8120 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph 2014-04-15 13:01:24 UTC (rev 8121) Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo ## -1,5 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph:8025-8120 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util 2014-04-15 13:01:24 UTC (rev 8121) Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo ## -1,5 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util:8025-8120 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/util:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/ProcessReaderHelper.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/ProcessReaderHelper.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/ProcessReaderHelper.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -84,11 +84,11 @@ */ public String readLine() throws IOException, InterruptedException { - final Thread t = Thread.currentThread(); +// final Thread t = Thread.currentThread(); while(getActiveProcess().isAlive()) { - if(t.isInterrupted()) { + if(Thread.interrupted()) { throw new InterruptedException(); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/win/TypeperfCollector.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/win/TypeperfCollector.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/win/TypeperfCollector.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -31,6 +31,7 @@ import java.io.IOException; import java.io.InputStreamReader; import java.io.LineNumberReader; +import java.nio.channels.ClosedByInterruptException; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Arrays; @@ -39,6 +40,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.concurrent.CancellationException; import org.apache.log4j.Logger; @@ -52,6 +54,7 @@ import com.bigdata.counters.IRequiredHostCounters; import com.bigdata.util.CSVReader; import com.bigdata.util.CSVReader.Header; +import com.bigdata.util.InnerCause; /** * Collects per-host performance counters on a Windows platform using @@ -68,19 +71,19 @@ */ public class TypeperfCollector extends AbstractProcessCollector { - static protected final Logger log = Logger.getLogger(TypeperfCollector.class); + static private final Logger log = Logger.getLogger(TypeperfCollector.class); - /** - * True iff the {@link #log} level is INFO or less. - */ - final protected static boolean INFO = log.isInfoEnabled(); +// /** +// * True iff the {@link #log} level is INFO or less. +// */ +// final protected static boolean INFO = log.isInfoEnabled(); +// +// /** +// * True iff the {@link #log} level is DEBUG or less. +// */ +// final protected static boolean DEBUG = log.isDebugEnabled(); /** - * True iff the {@link #log} level is DEBUG or less. - */ - final protected static boolean DEBUG = log.isDebugEnabled(); - - /** * Updated each time a new row of data is read from the process and reported * as the last modified time for counters based on that process and * defaulted to the time that we begin to collect performance data. @@ -175,6 +178,7 @@ } + @Override public Double getValue() { final Double value = (Double) vals.get(path); @@ -189,6 +193,7 @@ } + @Override public long lastModified() { return lastModified; @@ -199,7 +204,8 @@ * @throws UnsupportedOperationException * always. */ - public void setValue(Double value, long timestamp) { + @Override + public void setValue(final Double value, final long timestamp) { throw new UnsupportedOperationException(); @@ -225,6 +231,7 @@ * * @throws IOException */ + @Override public List<String> getCommand() { // make sure that our counters have been declared. @@ -243,7 +250,7 @@ // counter names need to be double quoted for the command line. command.add("\"" + decl.getCounterNameForWindows() + "\""); - if(INFO) log.info("Will collect: \"" + if(log.isInfoEnabled()) log.info("Will collect: \"" + decl.getCounterNameForWindows() + "\" as " + decl.getPath()); @@ -255,6 +262,7 @@ } + @Override public AbstractProcessReader getProcessReader() { return new ProcessReader(); @@ -290,9 +298,10 @@ } + @Override public void run() { - if(INFO) + if(log.isInfoEnabled()) log.info(""); try { @@ -300,27 +309,34 @@ // run read(); - } catch (InterruptedException e) { + } catch (Exception e) { - // Note: This is a normal exit. - if(INFO) - log.info("Interrupted - will terminate"); + if (InnerCause.isInnerCause(e, InterruptedException.class)|| + InnerCause.isInnerCause(e, ClosedByInterruptException.class)|| + InnerCause.isInnerCause(e, CancellationException.class) + ) { - } catch (Exception e) { + // Note: This is a normal exit. + if (log.isInfoEnabled()) + log.info("Interrupted - will terminate"); - // Unexpected error. - log.fatal(e.getMessage(), e); + } else { + // Unexpected error. + log.fatal(e.getMessage(), e); + + } + } - if(INFO) + if(log.isInfoEnabled()) log.info("Terminated"); } private void read() throws Exception { - if(INFO) + if(log.isInfoEnabled()) log.info(""); long nsamples = 0; @@ -345,33 +361,34 @@ */ csvReader.setTailDelayMillis(100/* ms */); - try { +// try { - // read headers from the file. - csvReader.readHeaders(); + // read headers from the file. + csvReader.readHeaders(); - } catch (IOException ex) { +// } catch (IOException ex) { +// +// /* +// * Note: An IOException thrown out here often indicates an +// * asynchronous close of of the reader. A common and benign +// * cause of that is closing the input stream because the service +// * is shutting down. +// */ +// +// if (!Thread.interrupted()) +// throw ex; +// +// throw new InterruptedException(); +// +// } - /* - * Note: An IOException thrown out here often indicates an - * asynchronous close of of the reader. A common and benign - * cause of that is closing the input stream because the service - * is shutting down. - */ - - if (!Thread.currentThread().isInterrupted()) - throw ex; - - throw new InterruptedException(); - - } - /* * replace the first header definition so that we get clean * timestamps. */ csvReader.setHeader(0, new Header("Timestamp") { - public Object parseValue(String text) { + @Override + public Object parseValue(final String text) { try { return f.parse(text); @@ -390,7 +407,7 @@ */ { - if(INFO) + if(log.isInfoEnabled()) log.info("setting up headers."); int i = 1; @@ -400,7 +417,7 @@ final String path = decl.getPath(); // String path = hostPathPrefix + decl.getPath(); - if (INFO) + if (log.isInfoEnabled()) log.info("setHeader[i=" + i + "]=" + path); csvReader.setHeader(i++, new Header(path)); @@ -409,13 +426,20 @@ } - if(INFO) + if(log.isInfoEnabled()) log.info("starting row reads"); - final Thread t = Thread.currentThread(); +// final Thread t = Thread.currentThread(); - while (!t.isInterrupted() && csvReader.hasNext()) { + while (true) { + if (Thread.interrupted()) + throw new InterruptedException(); + + if (!csvReader.hasNext()) { + break; + } + try { final Map<String, Object> row = csvReader.next(); @@ -455,7 +479,7 @@ } - if(INFO) + if(log.isInfoEnabled()) log.info("done."); } @@ -466,6 +490,7 @@ * Declares the performance counters to be collected from the Windows * platform. */ + @Override public CounterSet getCounters() { // if (root == null) { Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/htree/raba =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/htree/raba 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/htree/raba 2014-04-15 13:01:24 UTC (rev 8121) Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/htree/raba ___________________________________________________________________ Modified: svn:mergeinfo ## -1,5 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba:8025-8120 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/htree/raba:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/jsr166 =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/jsr166 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/jsr166 2014-04-15 13:01:24 UTC (rev 8121) Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/jsr166 ___________________________________________________________________ Modified: svn:mergeinfo ## -1,5 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/jsr166:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166:8025-8120 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/jsr166:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -2848,7 +2848,7 @@ /** * Note: This task is interrupted by {@link OverflowManager#shutdownNow()}. - * Therefore is tests {@link Thread#isInterrupted()} and returns immediately + * Therefore it tests {@link Thread#isInterrupted()} and returns immediately * if it has been interrupted. * * @return The return value is always null. @@ -3374,7 +3374,10 @@ static protected boolean isNormalShutdown( final ResourceManager resourceManager, final Throwable t) { - if(Thread.currentThread().isInterrupted()) return true; + if (Thread.interrupted()) { + // Note: interrupt status of thread was cleared. + return true; + } if (!resourceManager.isRunning() || !resourceManager.getConcurrencyManager() Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/FullTextIndex.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/FullTextIndex.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/FullTextIndex.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -955,35 +955,137 @@ } + /** + * Perform a range count on a full text query. + */ public int count(final FullTextQuery query) { - final Hit[] a = _search(query); + if (cache.containsKey(query)) { + + if (log.isInfoEnabled()) + log.info("found hits in cache"); + + return cache.get(query).length; + + } else { + + if (log.isInfoEnabled()) + log.info("did not find hits in cache"); + + } + + // tokenize the query. + final TermFrequencyData<V> qdata = tokenize(query); + + // No terms after stopword extraction + if (qdata == null) { + + cache.put(query, new Hit[] {}); + + return 0; + + } + + /* + * We can run an optimized version of this (just a quick range count) + * but only if the caller does not care about exact match and has + * not specified a regex. + */ + if (qdata.distinctTermCount() == 1 && + !query.isMatchExact() && query.getMatchRegex() == null) { + + final boolean prefixMatch = query.isPrefixMatch(); + + final Map.Entry<String, ITermMetadata> e = qdata.getSingletonEntry(); + + final String termText = e.getKey(); + + final ITermMetadata md = e.getValue(); + + final CountIndexTask<V> task1 = new CountIndexTask<V>(termText, 0, 1, + prefixMatch, md.getLocalTermWeight(), this); + + return (int) task1.getRangeCount(); + + } else { + + final Hit<V>[] a = _search(query); + + return a.length; + + } - return a.length; - } - public Hit<V>[] _search(final FullTextQuery q) { + protected TermFrequencyData<V> tokenize(final FullTextQuery query) { - final String query = q.getQuery(); - final String languageCode = q.getLanguageCode(); - final boolean prefixMatch = q.isPrefixMatch(); - final double minCosine = q.getMinCosine(); - final double maxCosine = q.getMaxCosine(); - final int minRank = q.getMinRank(); - final int maxRank = q.getMaxRank(); - final boolean matchAllTerms = q.isMatchAllTerms(); - final boolean matchExact = q.isMatchExact(); - final String regex = q.getMatchRegex(); - long timeout = q.getTimeout(); - final TimeUnit unit = q.getTimeUnit(); + final String q = query.getQuery(); + final String languageCode = query.getLanguageCode(); + final boolean prefixMatch = query.isPrefixMatch(); + // tokenize the query. + final TermFrequencyData<V> qdata; + { + + final TokenBuffer<V> buffer = new TokenBuffer<V>(1, this); + + /* + * If we are using prefix match ('*' operator) then we don't want to + * filter stopwords from the search query. + */ + final boolean filterStopwords = !prefixMatch; + + index(buffer, // + null, // docId // was Long.MIN_VALUE + Integer.MIN_VALUE, // fieldId + languageCode,// + new StringReader(q), // + filterStopwords// + ); + + if (buffer.size() == 0) { + + /* + * There were no terms after stopword extration. + */ + + log.warn("No terms after stopword extraction: query=" + query); + + return null; + + } + + qdata = buffer.get(0); + + qdata.normalize(); + + } + + return qdata; + + } + + public Hit<V>[] _search(final FullTextQuery query) { + + final String queryStr = query.getQuery(); + final String languageCode = query.getLanguageCode(); + final boolean prefixMatch = query.isPrefixMatch(); + final double minCosine = query.getMinCosine(); + final double maxCosine = query.getMaxCosine(); + final int minRank = query.getMinRank(); + final int maxRank = query.getMaxRank(); + final boolean matchAllTerms = query.isMatchAllTerms(); + final boolean matchExact = query.isMatchExact(); + final String regex = query.getMatchRegex(); + long timeout = query.getTimeout(); + final TimeUnit unit = query.getTimeUnit(); + final long begin = System.currentTimeMillis(); // if (languageCode == null) // throw new IllegalArgumentException(); - if (query == null) + if (queryStr == null) throw new IllegalArgumentException(); if (minCosine < 0d || minCosine > 1d) @@ -1002,7 +1104,7 @@ throw new IllegalArgumentException(); if (log.isInfoEnabled()) - log.info("languageCode=[" + languageCode + "], text=[" + query + log.info("languageCode=[" + languageCode + "], text=[" + queryStr + "], minCosine=" + minCosine + ", maxCosine=" + maxCosine + ", minRank=" + minRank @@ -1018,7 +1120,7 @@ } - final FullTextQuery cacheKey = q; + final FullTextQuery cacheKey = query; Hit<V>[] a; @@ -1034,145 +1136,24 @@ if (log.isInfoEnabled()) log.info("did not find hits in cache"); - // tokenize the query. - final TermFrequencyData<V> qdata; - { - - final TokenBuffer<V> buffer = new TokenBuffer<V>(1, this); - - /* - * If we are using prefix match ('*' operator) then we don't want to - * filter stopwords from the search query. - */ - final boolean filterStopwords = !prefixMatch; - - index(buffer, // - null, // docId // was Long.MIN_VALUE - Integer.MIN_VALUE, // fieldId - languageCode,// - new StringReader(query), // - filterStopwords// - ); - - if (buffer.size() == 0) { - - /* - * There were no terms after stopword extration. - */ - - log.warn("No terms after stopword extraction: query=" + query); - - a = new Hit[] {}; - - cache.put(cacheKey, a); - - return a; - - } - - qdata = buffer.get(0); - - qdata.normalize(); - - } - - final IHitCollector<V> hits; - - if (qdata.distinctTermCount() == 1) { - - final Map.Entry<String, ITermMetadata> e = qdata.getSingletonEntry(); - - final String termText = e.getKey(); + // tokenize the query. + final TermFrequencyData<V> qdata = tokenize(query); + + // No terms after stopword extraction + if (qdata == null) { - final ITermMetadata md = e.getValue(); - - final CountIndexTask<V> task1 = new CountIndexTask<V>(termText, 0, 1, prefixMatch, md - .getLocalTermWeight(), this); - - hits = new SingleTokenHitCollector<V>(task1); - - } else { - - final List<CountIndexTask<V>> tasks = new ArrayList<CountIndexTask<V>>( - qdata.distinctTermCount()); - - int i = 0; - for (Map.Entry<String, ITermMetadata> e : qdata.terms.entrySet()) { - - final String termText = e.getKey(); - - final ITermMetadata md = e.getValue(); - - tasks.add(new CountIndexTask<V>(termText, i++, qdata.terms.size(), prefixMatch, md - .getLocalTermWeight(), this)); - - } - - hits = new MultiTokenHitCollector<V>(tasks); - - } - - // run the queries. - { - - final List<Callable<Object>> tasks = new ArrayList<Callable<Object>>( - qdata.distinctTermCount()); - - int i = 0; - for (Map.Entry<String, ITermMetadata> e : qdata.terms.entrySet()) { - - final String termText = e.getKey(); - - final ITermMetadata md = e.getValue(); - - tasks.add(new ReadIndexTask<V>(termText, i++, qdata.terms.size(), - prefixMatch, md.getLocalTermWeight(), this, hits)); - - } - - final ExecutionHelper<Object> executionHelper = new ExecutionHelper<Object>( - getExecutorService(), timeout, unit); - - try { - - final long start = System.currentTimeMillis(); - - executionHelper.submitTasks(tasks); - - if (log.isInfoEnabled()) { - final long readTime = System.currentTimeMillis() - start; - log.info("read time: " + readTime); - } - - } catch (InterruptedException ex) { - - if (log.isInfoEnabled()) { - // TODO Should we wrap and toss this interrupt instead? - log.info("Interrupted - only partial results will be returned."); - } - - /* - * Yes, let's toss it. We were getting into a situation - * where the ExecutionHelper above received an interrupt - * but we still went through the heavy-weight filtering - * operations below (matchExact or matchRegex). - */ - throw new RuntimeException(ex); - - } catch (ExecutionException ex) { - - throw new RuntimeException(ex); - - } - - } - - a = hits.getHits(); - + cache.put(cacheKey, a = new Hit[] {}); + + return a; + + } + + a = executeQuery(qdata, prefixMatch, timeout, unit); + if (a.length == 0) { log.info("No hits: languageCode=[" + languageCode + "], query=[" - + query + "]"); + + queryStr + "]"); cache.put(cacheKey, a); @@ -1223,14 +1204,14 @@ */ if (matchExact) { - a = matchExact(a, query); + a = matchExact(a, queryStr); } if (a.length == 0) { log.warn("No hits after matchAllTerms pruning: languageCode=[" + languageCode + "], query=[" - + query + "]"); + + queryStr + "]"); cache.put(cacheKey, a); @@ -1260,7 +1241,7 @@ if (a.length == 0) { log.warn("No hits after regex pruning: languageCode=[" + languageCode + "], query=[" - + query + "], regex=[" + regex + "]"); + + queryStr + "], regex=[" + regex + "]"); cache.put(cacheKey, a); @@ -1299,6 +1280,27 @@ } + /* + * Take a slice of the hits based on min/max cosine and min/max rank. + */ + a = slice(query, a); + + final long elapsed = System.currentTimeMillis() - begin; + + if (log.isInfoEnabled()) + log.info("Done: " + a.length + " hits in " + elapsed + "ms"); + + return a; + + } + + protected Hit<V>[] slice(final FullTextQuery query, Hit<V>[] a) { + + final double minCosine = query.getMinCosine(); + final double maxCosine = query.getMaxCosine(); + final int minRank = query.getMinRank(); + final int maxRank = query.getMaxRank(); + // if (log.isDebugEnabled()) { // log.debug("before min/max cosine/rank pruning:"); // for (Hit<V> h : a) @@ -1422,13 +1424,106 @@ } - final long elapsed = System.currentTimeMillis() - begin; + return a; - if (log.isInfoEnabled()) - log.info("Done: " + a.length + " hits in " + elapsed + "ms"); + } + + protected Hit<V>[] executeQuery(final TermFrequencyData<V> qdata, + final boolean prefixMatch, final long timeout, final TimeUnit unit) { + + final IHitCollector<V> hits; + + if (qdata.distinctTermCount() == 1) { + + final Map.Entry<String, ITermMetadata> e = qdata.getSingletonEntry(); + + final String termText = e.getKey(); + + final ITermMetadata md = e.getValue(); - return a; + final CountIndexTask<V> task1 = new CountIndexTask<V>(termText, 0, 1, + prefixMatch, md.getLocalTermWeight(), this); + + hits = new SingleTokenHitCollector<V>(task1); + + } else { + + final List<CountIndexTask<V>> tasks = new ArrayList<CountIndexTask<V>>( + qdata.distinctTermCount()); + + int i = 0; + for (Map.Entry<String, ITermMetadata> e : qdata.terms.entrySet()) { + + final String termText = e.getKey(); + + final ITermMetadata md = e.getValue(); + + tasks.add(new CountIndexTask<V>(termText, i++, qdata.terms.size(), + prefixMatch, md.getLocalTermWeight(), this)); + + } + + hits = new MultiTokenHitCollector<V>(tasks); + + } + // run the queries. + { + + final List<Callable<Object>> tasks = new ArrayList<Callable<Object>>( + qdata.distinctTermCount()); + + int i = 0; + for (Map.Entry<String, ITermMetadata> e : qdata.terms.entrySet()) { + + final String termText = e.getKey(); + + final ITermMetadata md = e.getValue(); + + tasks.add(new ReadIndexTask<V>(termText, i++, qdata.terms.size(), + prefixMatch, md.getLocalTermWeight(), this, hits)); + + } + + final ExecutionHelper<Object> executionHelper = new ExecutionHelper<Object>( + getExecutorService(), timeout, unit); + + try { + + final long start = System.currentTimeMillis(); + + executionHelper.submitTasks(tasks); + + if (log.isInfoEnabled()) { + final long readTime = System.currentTimeMillis() - start; + log.info("read time: " + readTime); + } + + } catch (InterruptedException ex) { + + if (log.isInfoEnabled()) { + // TODO Should we wrap and toss this interrupt instead? + log.info("Interrupted - only partial results will be returned."); + } + + /* + * Yes, let's toss it. We were getting into a situation + * where the ExecutionHelper above received an interrupt + * but we still went through the heavy-weight filtering + * operations below (matchExact or matchRegex). + */ + throw new RuntimeException(ex); + + } catch (ExecutionException ex) { + + throw new RuntimeException(ex); + + } + + } + + return hits.getHits(); + } /** Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/ReadIndexTask.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/ReadIndexTask.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/ReadIndexTask.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -10,8 +10,6 @@ import com.bigdata.btree.ISimpleSplitHandler; import com.bigdata.btree.ITuple; import com.bigdata.btree.ITupleIterator; -import com.bigdata.btree.keys.IKeyBuilder; -import com.bigdata.btree.keys.SuccessorUtil; /** * Procedure reads on the terms index, aggregating data on a per-{@link Hit} @@ -131,12 +129,12 @@ log.debug("queryTerm=" + queryTerm + ", termWeight=" + queryTermWeight); - final Thread t = Thread.currentThread(); +// final Thread t = Thread.currentThread(); while (itr.hasNext()) { // don't test for interrupted on each result -- too much work. - if (nhits % 1000 == 0 && t.isInterrupted()) { + if (nhits % 1000 == 0 && Thread.interrupted()) { // if (log.isInfoEnabled()) log.warn("Interrupted: queryTerm=" + queryTerm + ", nhits=" Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractSubtask.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractSubtask.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractSubtask.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -337,8 +337,8 @@ public boolean hasNext() throws InterruptedException { - // The thread in which this method runs. - final Thread t = Thread.currentThread(); +// // The thread in which this method runs. +// final Thread t = Thread.currentThread(); // when we start looking for a chunk. final long begin = System.nanoTime(); @@ -349,7 +349,7 @@ master.halted(); // interrupted? - if (t.isInterrupted()) { + if (Thread.interrupted()) { throw master.halt(new InterruptedException(toString())); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -268,10 +268,13 @@ */ private class ReaderTask implements Callable<Void> { + @Override public Void call() throws Exception { - final Thread t = Thread.currentThread(); +// final Thread t = Thread.currentThread(); + boolean interrupted = false; + try { /* @@ -299,10 +302,11 @@ if (trace) System.err.print('~'); - if (t.isInterrupted()) { + if (Thread.interrupted()) { // thread interrupted, so we are done. - break; + interrupted = true; + break; // break out of while(true) } @@ -344,10 +348,11 @@ */ // don't call blocking method next() if we were interrupted. - if (t.isInterrupted()) { + if (Thread.interrupted()) { // thread interrupted, so we are done. - break; + interrupted = true; + break; // break out of while(true) } @@ -392,7 +397,7 @@ } if (INFO) - log.info("Reader is done."); + log.info("Reader is done: interrupted" + interrupted); return null; @@ -448,7 +453,8 @@ } } - + + @Override public void close() { if (future == null) { Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/util/CSVReader.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/util/CSVReader.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/util/CSVReader.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -71,9 +71,9 @@ */ public class CSVReader implements Iterator<Map<String, Object>> { - protected static final Logger log = Logger.getLogger(CSVReader.class); + private static final Logger log = Logger.getLogger(CSVReader.class); - protected static final boolean INFO = log.isInfoEnabled(); +// protected static final boolean INFO = log.isInfoEnabled(); /** * The #of characters to buffer in the reader. @@ -168,7 +168,7 @@ } - public Header(String name) { + public Header(final String name) { if (name == null) throw new IllegalArgumentException(); @@ -191,13 +191,13 @@ * * @return The parsed value. */ - public Object parseValue(String text) { + public Object parseValue(final String text) { for (int i = 0; i < formats.length; i++) { try { - Format f = formats[i]; + final Format f = formats[i]; if (f instanceof DateFormat) { @@ -229,23 +229,41 @@ /** * Equal if the headers have the same data. */ - public boolean equals(Header o) { - - if(this==o) return true; - - return name.equals(o.name); - + @Override + public boolean equals(final Object o) { + + if (this == o) + return true; + + if (!(o instanceof Header)) { + + return false; + + } + + return name.equals(((Header) o).name); + } +// public boolean equals(final Header o) { +// +// if(this==o) return true; +// +// return name.equals(o.name); +// +// } + /** * Based on the header name. */ + @Override public int hashCode() { return name.hashCode(); } + @Override public String toString() { return name; @@ -293,7 +311,8 @@ */ protected Header[] headers; - public CSVReader(InputStream is, String charSet) throws IOException { + public CSVReader(final InputStream is, final String charSet) + throws IOException { if (is == null) throw new IllegalArgumentException(); @@ -306,7 +325,7 @@ } - public CSVReader(Reader r) throws IOException { + public CSVReader(final Reader r) throws IOException { if (r == null) throw new IllegalArgumentException(); @@ -340,9 +359,9 @@ } - public boolean setSkipBlankLines(boolean skipBlankLines) { + public boolean setSkipBlankLines(final boolean skipBlankLines) { - boolean tmp = this.skipBlankLines; + final boolean tmp = this.skipBlankLines; this.skipBlankLines = skipBlankLines; @@ -356,9 +375,9 @@ } - public boolean setTrimWhitespace(boolean trimWhitespace) { + public boolean setTrimWhitespace(final boolean trimWhitespace) { - boolean tmp = this.trimWhitespace; + final boolean tmp = this.trimWhitespace; this.trimWhitespace = trimWhitespace; @@ -384,10 +403,11 @@ } - public long setTailDelayMillis(long tailDelayMillis) { - - if(tailDelayMillis<0) throw new IllegalArgumentException(); - + public long setTailDelayMillis(final long tailDelayMillis) { + + if (tailDelayMillis < 0) + throw new IllegalArgumentException(); + long tmp = this.tailDelayMillis; this.tailDelayMillis = tailDelayMillis; @@ -396,9 +416,11 @@ } + @Override public boolean hasNext() { - if(exhausted) return false; + if (exhausted) + return false; if (line != null) { @@ -406,17 +428,19 @@ } - final Thread currentThread = Thread.currentThread(); +// final Thread currentThread = Thread.currentThread(); try { while (true) { - if(currentThread.isInterrupted()) { + if (Thread.interrupted()) { - if(INFO) + if (log.isInfoEnabled()) log.info("Interrupted"); + exhausted = true; + return false; } @@ -469,6 +493,7 @@ } + @Override public Map<String, Object> next() { if ... [truncated message content] |
From: <tho...@us...> - 2014-04-15 14:16:19
|
Revision: 8122 http://sourceforge.net/p/bigdata/code/8122 Author: thompsonbry Date: 2014-04-15 14:16:12 +0000 (Tue, 15 Apr 2014) Log Message: ----------- Reconciled changes in the main development branch into the HA1/HA5 branch in preparation for merge back to the main development branch. See #722 and #723. Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java Removed Paths: ------------- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient1.config branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient5.config Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -142,7 +142,7 @@ * * @see <a href="http://trac.bigdata.com/ticket/721#comment:10"> HA1 </a> */ - private final byte[] s_zeros = new byte[256]; + static private final byte[] s_zeros = new byte[256]; /** * Buffer a write. Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -310,7 +310,7 @@ /** * When a record is used as a read cache then the readCount is - * maintained as a metric on its access. ???This could be used to + * maintained as a metric on its access. This could be used to * determine eviction/compaction. * <p> * Note: volatile to guarantee visibility of updates. Might do better Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -1165,7 +1165,7 @@ */ if (flush) { /* - * Send out the full cache block. FIXME Why are we not calling sendAddressMetadata() here? + * Send out the full cache block. */ writeCacheBlock(curCompactingCache); addClean(curCompactingCache, true/* addFirst */); @@ -1245,6 +1245,8 @@ * @throws InterruptedException * @throws ExecutionException * @throws IOException + * + * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a> */ private void sendAddressMetadata(final WriteCache cache) throws IllegalStateException, InterruptedException, @@ -1345,19 +1347,14 @@ private void writeCacheBlock(final WriteCache cache) throws InterruptedException, ExecutionException, IOException { -// /* -// * IFF HA -// * -// * TODO isHA should be true even if the quorum is not highly -// * available since there still could be other services in the write -// * pipeline (e.g., replication to an offline HAJournalServer prior -// * to changing over into an HA3 quorum or off-site replication). The -// * unit tests need to be updated to specify [isHighlyAvailable] for -// * ALL quorum based test runs. -// */ -// final boolean isHA = quorum != null && quorum.isHighlyAvailable(); - - // IFF HA and this is the quorum leader. + /** + * IFF HA and this is the quorum leader. + * + * Note: This is true for HA1 as well. The code path enabled by this + * is responsible for writing the HALog files. + * + * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a> + */ final boolean isHALeader = quorum != null && quorum.getClient().isLeader(quorumToken); @@ -1441,6 +1438,12 @@ */ quorumMember.logWriteCacheBlock(pkg.getMessage(), pkg.getData().duplicate()); + /* + * TODO Do we want to always support the replication code path + * when a quorum exists (that is, also for HA1) in case there + * are pipeline listeners that are not HAJournalServer + * instances? E.g., for offsite replication? + */ if (quorum.replicationFactor() > 1) { // ASYNC MSG RMI + NIO XFER. Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -3429,13 +3429,8 @@ return; if (!quorum.isHighlyAvailable()) { - // FIXME: Find the reason why this delay is needed to pass HA1 snapshot tests -// try { -// Thread.sleep(1000); -// } catch (InterruptedException e) { -// e.printStackTrace(); -// } - return; + // Gather and 2-phase commit are not used in HA1. + return; } /** @@ -3550,9 +3545,11 @@ store._commitRecord = store._getCommitRecord(); if (quorum != null) { - /* + /** * Write the root block on the HALog file, closing out that * file. + * + * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a> */ final QuorumService<HAGlue> localService = quorum.getClient(); if (localService != null) { @@ -3872,7 +3869,7 @@ if (quorum == null || quorum.replicationFactor() == 1) { - // Non-HA mode. + // Non-HA mode (including HA1). cs.commitSimple(); } else { Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -521,7 +521,7 @@ final int nbuffers = 1; final boolean useChecksums = false; - final boolean isHighlyAvailable = true; // for HA1! false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; @@ -619,7 +619,7 @@ final int nbuffers = 2; final boolean useChecksums = false; - final boolean isHighlyAvailable = true; // for HA1! false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; @@ -672,7 +672,7 @@ */ final double largeRecordRate = 0d; final boolean useChecksums = false; - final boolean isHighlyAvailable = true; // for HA1! false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; @@ -717,7 +717,7 @@ final int nbuffers = 6; final boolean useChecksums = true; - final boolean isHighlyAvailable = true; // for HA1! false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; @@ -770,7 +770,7 @@ */ final double largeRecordRate = 0d; final boolean useChecksums = true; - final boolean isHighlyAvailable = true; // for HA1! false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -4574,7 +4574,7 @@ // } - log.warn("Starting NSS from " + jettyXml); + log.warn("Starting NSS"); // Start the server. jettyServer.start(); @@ -4658,9 +4658,8 @@ if (tmp == null) throw new IllegalStateException("Server is not running"); - final int port = tmp.getConnectors()[0].getLocalPort(); - haLog.warn("Returning NSSPort: " + port); - return port; + return tmp.getConnectors()[0].getLocalPort(); + } /** Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -63,6 +63,13 @@ super(name); } + @Override + protected int replicationFactor() { + + return 3; + + } + /** * Issue HTTP request to a service to take a snapshot. * Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -87,7 +87,6 @@ import com.bigdata.jini.util.JiniUtil; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.StoreState; -import com.bigdata.journal.jini.ha.HAJournalServer.ConfigurationOptions; import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; import com.bigdata.quorum.AbstractQuorumClient; import com.bigdata.quorum.AsynchronousQuorumCloseException; @@ -110,7 +109,7 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ -public class AbstractHA3JournalServerTestCase extends +public abstract class AbstractHA3JournalServerTestCase extends AbstractHAJournalServerTestCase implements DiscoveryListener { /** Quorum client used to monitor (or act on) the logical service quorum. */ @@ -133,7 +132,7 @@ * Implementation listens for the death of the child process and can be used * to decide when the child process is no longer executing. */ - public static class ServiceListener implements IServiceListener { + static class ServiceListener implements IServiceListener { private volatile HAGlue haGlue; private volatile ProcessHelper processHelper; @@ -152,13 +151,14 @@ this.haGlue = haGlue; } - @SuppressWarnings("unused") - public HAGlue getHAGlue() { +// @SuppressWarnings("unused") +// public HAGlue getHAGlue() { +// +// return haGlue; +// +// } - return haGlue; - - } - + @Override public void add(final ProcessHelper processHelper) { if (processHelper == null) @@ -1378,11 +1378,33 @@ } - protected String getZKConfigFile() { - return "zkClient.config"; + /** + * Return the zookeeper client configuration file. + */ + final protected String getZKConfigFile() { + + return "zkClient.config"; + } /** + * The as-configured replication factor. + * <p> + * Note: This is defined in the HAJournal.config file, which is where the + * {@link HAJournalServer} gets the correct value. We also need to have the + * replicationFactor on hand for the test suite so we can setup the quorum + * in the test fixture correctly. However, it is difficult to reach the + * appropriate HAJournal.config file from the text fixture during + * {@link #setUp()}. Therefore, for the test setup, this is achieved by + * overriding this abstract method in the test class. + */ + protected int replicationFactor() { + + return 3; + + } + + /** * Return Zookeeper quorum that can be used to reflect (or act on) the * distributed quorum state for the logical service. * @@ -1407,7 +1429,7 @@ // Note: Save reference. this.zookeeper = new ZooKeeper(zoohosts, sessionTimeout, new Watcher() { @Override - public void process(WatchedEvent event) { + public void process(final WatchedEvent event) { if (log.isInfoEnabled()) log.info(event); } @@ -1457,9 +1479,19 @@ logicalServiceZPath = logicalServiceZPathPrefix + "/" + logicalServiceId; - final int replicationFactor = (Integer) config.getEntry( - ZookeeperClientConfig.Options.NAMESPACE, - ConfigurationOptions.REPLICATION_FACTOR, Integer.TYPE); + /** + * Note: This is defined in the HAJournal.config file, which is where + * the HAJournalServer gets the correct value. + * + * However, we also need to have the replicationFactor on hand for the + * test suite so we can setup the quorum in the test fixture correctly. + */ + final int replicationFactor = replicationFactor(); +// { +// replicationFactor = (Integer) config.getEntry( +// ConfigurationOptions.COMPONENT, +// ConfigurationOptions.REPLICATION_FACTOR, Integer.TYPE); +// } // if (!zka.awaitZookeeperConnected(10, TimeUnit.SECONDS)) { // @@ -1565,7 +1597,7 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ - public abstract class StartServerTask implements Callable<HAGlue> { + abstract class StartServerTask implements Callable<HAGlue> { private final String name; private final String configName; Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.journal.jini.ha; import java.io.File; @@ -16,17 +39,13 @@ import com.bigdata.ha.HAGlue; import com.bigdata.jini.start.IServiceListener; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownATask; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownBTask; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownCTask; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownTask; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.ServiceListener; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartATask; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartBTask; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartCTask; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartServerTask; import com.bigdata.quorum.AsynchronousQuorumCloseException; +/** + * Test suite for HA5. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ public class AbstractHA5JournalServerTestCase extends AbstractHA3JournalServerTestCase { @@ -53,10 +72,6 @@ protected final int D_JETTY_PORT = C_JETTY_PORT + 1; protected final int E_JETTY_PORT = D_JETTY_PORT + 1; - protected String getZKConfigFile() { - return "zkClient5.config"; // 5 stage pipeline - } - /** * These {@link IServiceListener}s are used to reliably detect that the * corresponding process starts and (most importantly) that it is really @@ -88,6 +103,13 @@ return new File(getServiceDirE(), "HALog"); } + @Override + protected int replicationFactor() { + + return 5; + + } + /** * Start A then B then C. As each service starts, this method waits for that * service to appear in the pipeline in the proper position. @@ -141,7 +163,7 @@ } /** - * Start of 3 HA services (this happens in the ctor). + * Start of 5 HA services (this happens in the ctor). * * @param sequential * True if the startup should be sequential or false if @@ -362,6 +384,7 @@ super(name); } + @Override protected void destroyAll() throws AsynchronousQuorumCloseException, InterruptedException, TimeoutException { /** Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -1259,35 +1259,34 @@ } - /** - * The effective name for this test as used to name the directories in which - * we store things. - * - * TODO If there are method name collisions across the different test - * classes then the test suite name can be added to this. Also, if there are - * file naming problems, then this value can be munged before it is - * returned. - */ - private final String effectiveTestFileName = getClass().getSimpleName() - + "." + getName(); +// /** +// * The effective name for this test as used to name the directories in which +// * we store things. +// * +// * TODO If there are method name collisions across the different test +// * classes then the test suite name can be added to this. Also, if there are +// * file naming problems, then this value can be munged before it is +// * returned. +// */ +// private final String effectiveTestFileName = getClass().getSimpleName() +// + "." + getName(); +// +// /** +// * The directory that is the parent of each {@link HAJournalServer}'s +// * individual service directory. +// */ +// protected File getTestDir() { +// return new File(TGT_PATH, getEffectiveTestFileName()); +// } +// +// /** +// * The effective name for this test as used to name the directories in which +// * we store things. +// */ +// protected String getEffectiveTestFileName() { +// +// return effectiveTestFileName; +// +// } - /** - * The directory that is the parent of each {@link HAJournalServer}'s - * individual service directory. - */ - protected File getTestDir() { - return new File(TGT_PATH, getEffectiveTestFileName()); - } - - /** - * The effective name for this test as used to name the directories in which - * we store things. - */ - protected String getEffectiveTestFileName() { - - return effectiveTestFileName; - - } - - } Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config 2014-04-15 14:16:12 UTC (rev 8122) @@ -67,7 +67,7 @@ private static haPort = ConfigMath.add(9090,3); // The #of services in the write pipeline. - private static replicationFactor = 5; + private static replicationFactor = 5; // Note: overridden in the HA5 test suites. // The logical service identifier shared by all members of the quorum. private static logicalServiceId = System.getProperty("test.logicalServiceId","CI-HAJournal-1"); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config 2014-04-15 14:16:12 UTC (rev 8122) @@ -67,7 +67,7 @@ private static haPort = ConfigMath.add(9090,4); // The #of services in the write pipeline. - private static replicationFactor = 5; + private static replicationFactor = 5; // Note: overridden in the HA5 test suites. // The logical service identifier shared by all members of the quorum. private static logicalServiceId = System.getProperty("test.logicalServiceId","CI-HAJournal-1"); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -75,6 +75,13 @@ super(name); } + @Override + protected int replicationFactor() { + + return 3; + + } + /** * Complex hack to override the {@link HAJournal} properties. * Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -74,6 +74,11 @@ // Basic tests for a single HAJournalServer (quorum does not meet) suite.addTestSuite(TestHAJournalServer.class); + // HA1 test suite. + suite.addTestSuite(TestHA1JournalServer.class); + suite.addTestSuite(TestHA1SnapshotPolicy.class); + suite.addTestSuite(TestHA1SnapshotPolicy2.class); + // HA2 test suite (k=3, but only 2 services are running). suite.addTestSuite(TestHA2JournalServer.class); @@ -108,17 +113,16 @@ // Verify ability to override the HAJournal implementation class. suite.addTestSuite(TestHAJournalServerOverride.class); - // Test suite of longer running stress tests for an HA3 cluster. - suite.addTestSuite(StressTestHA3JournalServer.class); - - // Test suite of longer running stress tests for an HA5 cluster. + // HA5 test suite. suite.addTestSuite(TestHA5JournalServer.class); suite.addTestSuite(TestHA5JournalServerWithHALogs.class); - // Test suite of longer running stress tests for an HA1 cluster. - suite.addTestSuite(TestHA1JournalServer.class); - suite.addTestSuite(TestHA1SnapshotPolicy.class); - suite.addTestSuite(TestHA1SnapshotPolicy2.class); + /* + * Stress tests. + */ + + // Test suite of longer running stress tests for an HA3 cluster. + suite.addTestSuite(StressTestHA3JournalServer.class); return suite; Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.journal.jini.ha; import java.util.concurrent.TimeUnit; @@ -3,10 +26,13 @@ import java.util.concurrent.TimeoutException; -import com.bigdata.ha.HAGlue; -import com.bigdata.ha.HAStatusEnum; - import net.jini.config.Configuration; +import com.bigdata.ha.HAGlue; +/** + * Test suite for HA1. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ public class TestHA1JournalServer extends AbstractHA3JournalServerTestCase { @@ -27,15 +53,11 @@ "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", // "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"", "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", - "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=1", + "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor="+replicationFactor(), }; } - protected String getZKConfigFile() { - return "zkClient1.config"; // 1 stage pipeline - } - public TestHA1JournalServer() { } @@ -43,40 +65,47 @@ super(name); } + @Override + protected int replicationFactor() { + + return 1; + + } + public void testStartA() throws Exception { - doStartA(); + doStartA(); } - + protected void doStartA() throws Exception { try { - quorum.awaitQuorum(awaitQuorumTimeout, - TimeUnit.MILLISECONDS); - - fail("HA1 requires quorum of 1!"); + quorum.awaitQuorum(awaitQuorumTimeout, TimeUnit.MILLISECONDS); + + fail("HA1 requires quorum of 1!"); } catch (TimeoutException te) { - // expected + // expected } // Start 1 service. final HAGlue serverA = startA(); - + // this should succeed final long token = quorum.awaitQuorum(awaitQuorumTimeout, TimeUnit.MILLISECONDS); - + assertEquals(token, awaitFullyMetQuorum()); - + final HAGlue leader = quorum.getClient().getLeader(token); - + assertEquals(serverA, leader); } - + public void testSimpleTransaction() throws Exception { - doStartA(); - + + doStartA(); + serverA.awaitHAReady(2, TimeUnit.SECONDS); - + /* * Awaiting HAReady is not sufficient since the service may still * writing the initial transaction. @@ -85,30 +114,27 @@ * status of a new journal being ready too soon to process an NSS * request */ - - awaitCommitCounter(1, new HAGlue[] { serverA}); - - // Thread.sleep(100); - - // serverA. - - log.warn("Calling SimpleTransaction"); - simpleTransaction(); - - awaitCommitCounter(2, new HAGlue[] { serverA}); + + awaitCommitCounter(1, new HAGlue[] { serverA }); + + simpleTransaction(); + + awaitCommitCounter(2, new HAGlue[] { serverA }); + } - + public void testMultiTransaction() throws Exception { - doStartA(); - - awaitCommitCounter(1, new HAGlue[] { serverA}); + doStartA(); + + awaitCommitCounter(1, new HAGlue[] { serverA }); // Thread.sleep(1000); - + final int NTRANS = 10; - for (int t = 0; t < NTRANS; t++) { - simpleTransaction(); - } + for (int t = 0; t < NTRANS; t++) { + simpleTransaction(); + } - awaitCommitCounter(NTRANS+1, new HAGlue[] { serverA}); + awaitCommitCounter(NTRANS + 1, new HAGlue[] { serverA }); + } + } -} Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -30,10 +30,13 @@ super(name); } - protected String getZKConfigFile() { - return "zkClient1.config"; // 1 stage pipeline + @Override + protected int replicationFactor() { + + return 1; + } - + /** * {@inheritDoc} * <p> @@ -60,7 +63,7 @@ "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.DefaultSnapshotPolicy("+neverRun+",0)", // "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", // "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", - "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=1", + "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor="+replicationFactor(), }; } Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -1,13 +1,37 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.journal.jini.ha; -import java.util.concurrent.TimeUnit; - import net.jini.config.Configuration; import com.bigdata.ha.HAGlue; -import com.bigdata.ha.halog.HALogWriter; -import com.bigdata.ha.msg.HARootBlockRequest; +/** + * Test suite for HA1 online backups and point in time restore. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ public class TestHA1SnapshotPolicy2 extends AbstractHA3BackupTestCase { public TestHA1SnapshotPolicy2() { @@ -17,11 +41,11 @@ super(name); } - /** How long to wait for snapshots to appear. */ - private final long awaitSnapshotMillis = 5000; - - protected String getZKConfigFile() { - return "zkClient1.config"; // 1 stage pipeline + @Override + protected int replicationFactor() { + + return 1; + } /** @@ -46,7 +70,7 @@ "com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy(0L,1,0)", "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.DefaultSnapshotPolicy("+neverRun+",0)", "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", - "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=1" + "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor="+replicationFactor() }; } Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -181,9 +181,6 @@ assertDigestsEquals(new HAGlue[] { serverA, serverB }); // Verify can not write on follower. - log.warn("ServerA port: " + serverA.getNSSPort()); - log.warn("ServerB port: " + serverB.getNSSPort()); - assertWriteRejected(serverB); // Start 3rd service. Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.journal.jini.ha; import java.util.concurrent.Callable; @@ -10,9 +33,12 @@ import com.bigdata.ha.HAGlue; import com.bigdata.ha.HAStatusEnum; import com.bigdata.ha.msg.HARootBlockRequest; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.ABC; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.LargeLoadTask; +/** + * HA5 test suite. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ public class TestHA5JournalServer extends AbstractHA5JournalServerTestCase { /** @@ -31,11 +57,18 @@ "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", // "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"", "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", - "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=5", + "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor="+replicationFactor(), }; } + @Override + protected int replicationFactor() { + + return 5; + + } + public TestHA5JournalServer() { } @@ -49,9 +82,11 @@ * @throws Exception */ public void testStartABC_DE() throws Exception { - doStartABC_DE(); + + doStartABC_DE(); + } - + protected void doStartABC_DE() throws Exception { // Start 3 services. @@ -171,117 +206,115 @@ * HA5 is fully met after 5 services are started simultaneously */ public void testABCDESimultaneous() throws Exception { - - final ABCDE startup = new ABCDE(false); - - awaitFullyMetQuorum(); - + + final ABCDE startup = new ABCDE(false); + + awaitFullyMetQuorum(); + startup.assertDigestsEqual(); } - + /** * HA5 is fully met after 5 services are started sequentially */ public void testABCDESequential() throws Exception { - - final ABCDE startup = new ABCDE(true); - - awaitFullyMetQuorum(); - + + final ABCDE startup = new ABCDE(true); + + awaitFullyMetQuorum(); + startup.assertDigestsEqual(); } - + /** * HA5 remains met with 1 service failure */ public void testABCDEShutdownC() throws Exception { - - final ABCDE startup = new ABCDE(true); - - final long token = awaitFullyMetQuorum(); - + + final ABCDE startup = new ABCDE(true); + + final long token = awaitFullyMetQuorum(); + startup.assertDigestsEqual(); - + shutdownC(); - - awaitPipeline(new HAGlue[] {serverA, serverB, serverD, serverE}); - + + awaitPipeline(new HAGlue[] { serverA, serverB, serverD, serverE }); + assertEquals(token, awaitMetQuorum()); } - + /** * HA5 remains met with 2 service failures */ public void testABCDEShutdownBD() throws Exception { - - final ABCDE startup = new ABCDE(true); - - final long token = awaitFullyMetQuorum(); - + + final ABCDE startup = new ABCDE(true); + + final long token = awaitFullyMetQuorum(); + startup.assertDigestsEqual(); - + shutdownB(); shutdownD(); - - awaitPipeline(new HAGlue[] {serverA, serverC, serverE}); - + + awaitPipeline(new HAGlue[] { serverA, serverC, serverE }); + assertEquals(token, awaitMetQuorum()); } - + /** - * HA5 breaks with 3 service failures and re-meets when one - * is restarted + * HA5 breaks with 3 service failures and re-meets when one is restarted */ public void testABCDEShutdownBCD() throws Exception { - - final ABCDE startup = new ABCDE(true); - - final long token = awaitFullyMetQuorum(); - + + final ABCDE startup = new ABCDE(true); + + final long token = awaitFullyMetQuorum(); + startup.assertDigestsEqual(); - + shutdownB(); shutdownC(); shutdownD(); - + // Non-deterministic pipeline order // awaitPipeline(new HAGlue[] {serverA, serverE}); - + try { - awaitMetQuorum(); - fail("Quorum should not be met"); + awaitMetQuorum(); + fail("Quorum should not be met"); } catch (TimeoutException te) { - // expected + // expected } - + startC(); - + assertFalse(token == awaitMetQuorum()); } - + /** - * HA5 breaks when leader fails, meets on new token - * then fully meets on same token when previous leader - * is restarted + * HA5 breaks when leader fails, meets on new token then fully meets on same + * token when previous leader is restarted */ public void testABCDEShutdownLeader() throws Exception { - - final ABCDE startup = new ABCDE(true); - - final long token = awaitFullyMetQuorum(); - + + final ABCDE startup = new ABCDE(true); + + final long token = awaitFullyMetQuorum(); + startup.assertDigestsEqual(); - + shutdownA(); - + // pipeline order is non-deterministic - + final long token2 = awaitMetQuorum(); - - assertFalse(token==token2); - + + assertFalse(token == token2); + startA(); - + assertTrue(token2 == awaitFullyMetQuorum()); } Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.journal.jini.ha; import java.io.File; @@ -3,13 +26,16 @@ import java.util.Calendar; +import net.jini.config.Configuration; + import com.bigdata.ha.HAGlue; import com.bigdata.ha.halog.HALogReader; import com.bigdata.ha.halog.IHALogReader; import com.bigdata.journal.CommitCounterUtility; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.ABC; -import net.jini.config.Configuration; -import junit.framework.TestCase; - +/** + * HA5 test suite. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ public class TestHA5JournalServerWithHALogs extends AbstractHA5JournalServerTestCase { @@ -46,6 +72,13 @@ } + @Override + protected int replicationFactor() { + + return 5; + + } + /** * {@inheritDoc} * <p> @@ -67,7 +100,7 @@ return new String[]{ "com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy()", "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.DefaultSnapshotPolicy("+neverRun+",0)", - "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=5", + "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor="+replicationFactor(), }; } Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties 2014-04-15 14:16:12 UTC (rev 8122) @@ -11,7 +11,7 @@ #log4j.logger.com.bigdata.service.jini.lookup=ALL #log4j.logger.com.bigdata.quorum=INFO log4j.logger.com.bigdata.quorum.zk=INFO -log4j.logger.com.bigdata.io.writecache=INFO +#log4j.logger.com.bigdata.io.writecache=INFO #log4j.logger.com.bigdata.zookeeper=INFO #log4j.logger.com.bigdata.zookeeper.ZooHelper=ALL log4j.logger.com.bigdata.rdf.internal.LexiconConfiguration=FATAL Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config 2014-04-15 14:16:12 UTC (rev 8122) @@ -30,9 +30,6 @@ private static fedname = "benchmark"; - // The #of services in the write pipeline. - private static replicationFactor = 3; - /* The logical service identifier shared by all members of the quorum. * * Note: The test fixture ignores this value. For the avoidance of Deleted: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient1.config =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient1.config 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient1.config 2014-04-15 14:16:12 UTC (rev 8122) @@ -1,91 +0,0 @@ -/* Zookeeper client only configuration. - */ -import java.io.File; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.UUID; - -import com.bigdata.util.NV; -import com.bigdata.util.config.NicUtil; -import com.bigdata.journal.Options; -import com.bigdata.journal.BufferMode; -import com.bigdata.journal.jini.ha.HAJournal; -import com.bigdata.jini.lookup.entry.*; -import com.bigdata.service.IBigdataClient; -import com.bigdata.service.AbstractTransactionService; -import com.bigdata.service.jini.*; -import com.bigdata.service.jini.lookup.DataServiceFilter; -import com.bigdata.service.jini.master.ServicesTemplate; -import com.bigdata.jini.start.config.*; -import com.bigdata.jini.util.ConfigMath; - -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.data.ACL; -import org.apache.zookeeper.data.Id; - -/* - * Globals. - */ -bigdata { - - private static fedname = "benchmark"; - - // The #of services in the write pipeline. - private static replicationFactor = 1; - - /* The logical service identifier shared by all members of the quorum. - * - * Note: The test fixture ignores this value. For the avoidance of - * doubt, the value is commented out. - */ - //private static logicalServiceId = "CI-HAJournal-1"; - - // zookeeper - static private sessionTimeout = (int)ConfigMath.s2ms(20); - -} - -/* - * Zookeeper client configuration. - */ -org.apache.zookeeper.ZooKeeper { - - /* Root znode for the federation instance. */ - zroot = "/" + bigdata.fedname; - - /* A comma separated list of host:port pairs, where the port is - * the CLIENT port for the zookeeper server instance. - */ - // standalone. - servers = "localhost:2081"; - // ensemble -// servers = bigdata.zoo1+":2181" -// + ","+bigdata.zoo2+":2181" -// + ","+bigdata.zoo3+":2181" -// ; - - /* Session timeout (optional). */ - sessionTimeout = bigdata.sessionTimeout; - - /* - * ACL for the zookeeper nodes created by the bigdata federation. - * - * Note: zookeeper ACLs are not transmitted over secure channels - * and are placed into plain text Configuration files by the - * ServicesManagerServer. - */ - acl = new ACL[] { - - new ACL(ZooDefs.Perms.ALL, new Id("world", "anyone")) - - }; - - /* - * Note: Normally on the HAJournalServer component. Hacked in the test - * suite setup to look at the ZooKeeper component instead. - */ - - logicalServiceId = bigdata.logicalServiceId; - - replicationFactor = bigdata.replicationFactor; -} Deleted: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient5.config =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient5.config 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient5.config 2014-04-15 14:16:12 UTC (rev 8122) @@ -1,91 +0,0 @@ -/* Zookeeper client only configuration. - */ -import java.io.File; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.UUID; - -import com.bigdata.util.NV; -import com.bigdata.util.config.NicUtil; -import com.bigdata.journal.Options; -import com.bigdata.journal.BufferMode; -import com.bigdata.journal.jini.ha.HAJournal; -import com.bigdata.jini.lookup.entry.*; -import com.bigdata.service.IBigdataClient; -import com.bigdata.service.AbstractTransactionService; -import com.bigdata.service.jini.*; -import com.bigdata.service.jini.lookup.DataServiceFilter; -import com.bigdata.service.jini.master.ServicesTemplate; -import com.bigdata.jini.start.config.*; -import com.bigdata.jini.util.ConfigMath; - -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.data.ACL; -import org.apache.zookeeper.data.Id; - -/* - * Globals. - */ -bigdata { - - private static fedname = "benchmark"; - - // The #of services in the write pipeline. - private static replicationFactor = 5; - - /* The logical service identifier shared by all members of the quorum. - * - * Note: The test fixture ignores this value. For the avoidance of - * doubt, the value is commented out. - */ - //private static logicalServiceId = "CI-HAJournal-1"; - - // zookeeper - static private sessionTimeout = (int)ConfigMath.s2ms(20); - -} - -/* - * Zookeeper client configuration. - */ -org.apache.zookeeper.ZooKeeper { - - /* Root znode for the federation instance. */ - zroot = "/" + bigdata.fedname; - - /* A comma separated list of host:port pairs, where the port is - * the CLIENT port for the zookeeper server instance. - */ - // standalone. - servers = "localhost:2081"; - // ensemble -// servers = bigdata.zoo1+":2181" -// + ","+bigdata.zoo2+":2181" -// + ","+bigdata.zoo3+":2181" -// ; - - /* Session timeout (optional). */ - sessionTimeout = bigdata.sessionTimeout; - - /* - * ACL for the zookeeper nodes created by the bigdata federation. - * - * Note: zookeeper ACLs are not transmitted over secure channels - * and are placed into plain text Configuration files by the - * ServicesManagerServer. - */ - acl = new ACL[] { - - new ACL(ZooDefs.Perms.ALL, new Id("world", "anyone")) - - }; - - /* - * Note: Normally on the HAJournalServer component. Hacked in the test - * suite setup to look at the ZooKeeper component instead. - */ - - logicalServiceId = bigdata.logicalServiceId; - - replicationFactor = bigdata.replicationFactor; -} Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -184,7 +184,7 @@ * withstand a failure. */ static public final URI ReplicationFactor = new URIImpl(BDFNS - + "replicationCount"); + + "replicationFactor"); /** * An {@link IBigdataFederation}. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |