|
From: <mar...@us...> - 2014-04-02 13:13:00
|
Revision: 8026
http://sourceforge.net/p/bigdata/code/8026
Author: martyncutcher
Date: 2014-04-02 13:12:56 +0000 (Wed, 02 Apr 2014)
Log Message:
-----------
Initial commit on creation of HA1_HA5 branch
Modified Paths:
--------------
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java
Added Paths:
-----------
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-D.properties
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-E.properties
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient1.config
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient5.config
branches/BIGDATA_MGC_HA1_HA5/branch-notes.txt
Removed Paths:
-------------
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-02 13:03:59 UTC (rev 8025)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-02 13:12:56 UTC (rev 8026)
@@ -3428,8 +3428,8 @@
if (quorum == null)
return;
- if (!quorum.isHighlyAvailable())
- return;
+// if (!quorum.isHighlyAvailable())
+// return;
/**
* CRITICAL SECTION. We need obtain a distributed consensus for the
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-04-02 13:03:59 UTC (rev 8025)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-04-02 13:12:56 UTC (rev 8026)
@@ -4574,7 +4574,7 @@
// }
- log.warn("Starting NSS");
+ log.warn("Starting NSS from " + jettyXml);
// Start the server.
jettyServer.start();
@@ -4658,8 +4658,9 @@
if (tmp == null)
throw new IllegalStateException("Server is not running");
- return tmp.getConnectors()[0].getLocalPort();
-
+ final int port = tmp.getConnectors()[0].getLocalPort();
+ haLog.warn("Returning NSSPort: " + port);
+ return port;
}
/**
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-04-02 13:03:59 UTC (rev 8025)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-04-02 13:12:56 UTC (rev 8026)
@@ -1096,6 +1096,8 @@
+ haLogBytesOnDisk//
+ ", journalSize="
+ journalSize//
+ + ", thresholdPercentLogSize="
+ + thresholdPercentLogSize//
+ ", percentLogSize="
+ actualPercentLogSize//
+ "%, takeSnapshot=" + takeSnapshot //
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-02 13:03:59 UTC (rev 8025)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-02 13:12:56 UTC (rev 8026)
@@ -133,7 +133,7 @@
* Implementation listens for the death of the child process and can be used
* to decide when the child process is no longer executing.
*/
- private static class ServiceListener implements IServiceListener {
+ public static class ServiceListener implements IServiceListener {
private volatile HAGlue haGlue;
private volatile ProcessHelper processHelper;
@@ -218,8 +218,12 @@
* The {@link Remote} interfaces for these services (if started and
* successfully discovered).
*/
- private HAGlue serverA = null, serverB = null, serverC = null;
+ protected HAGlue serverA = null;
+ protected HAGlue serverB = null;
+
+ protected HAGlue serverC = null;
+
/**
* {@link UUID}s for the {@link HAJournalServer}s.
*/
@@ -232,17 +236,20 @@
* @see <a href="http://trac.bigdata.com/ticket/730" > Allow configuration
* of embedded NSS jetty server using jetty-web.xml </a>
*/
- private final int A_JETTY_PORT = 8090, B_JETTY_PORT = A_JETTY_PORT + 1,
- C_JETTY_PORT = B_JETTY_PORT + 1;
+ protected final int A_JETTY_PORT = 8090;
+ protected final int B_JETTY_PORT = A_JETTY_PORT + 1;
+ protected final int C_JETTY_PORT = B_JETTY_PORT + 1;
/**
* These {@link IServiceListener}s are used to reliably detect that the
* corresponding process starts and (most importantly) that it is really
* dies once it has been shutdown or destroyed.
*/
- private ServiceListener serviceListenerA = null, serviceListenerB = null;
+ protected ServiceListener serviceListenerA = null;
- private ServiceListener serviceListenerC = null;
+ protected ServiceListener serviceListenerB = null;
+
+ protected ServiceListener serviceListenerC = null;
private LookupDiscoveryManager lookupDiscoveryManager = null;
@@ -1143,14 +1150,14 @@
}
- private void safeShutdown(final HAGlue haGlue, final File serviceDir,
+ void safeShutdown(final HAGlue haGlue, final File serviceDir,
final ServiceListener serviceListener) {
safeShutdown(haGlue, serviceDir, serviceListener, false/* now */);
}
- private void safeShutdown(final HAGlue haGlue, final File serviceDir,
+ protected void safeShutdown(final HAGlue haGlue, final File serviceDir,
final ServiceListener serviceListener, final boolean now) {
if (haGlue == null)
@@ -1368,6 +1375,10 @@
}
+ protected String getZKConfigFile() {
+ return "zkClient.config";
+ }
+
/**
* Return Zookeeper quorum that can be used to reflect (or act on) the
* distributed quorum state for the logical service.
@@ -1382,7 +1393,7 @@
KeeperException, IOException {
final Configuration config = ConfigurationProvider
- .getInstance(new String[] { SRC_PATH + "zkClient.config" });
+ .getInstance(new String[] { SRC_PATH + getZKConfigFile() });
zkClientConfig = new ZookeeperClientConfig(config);
@@ -1551,7 +1562,7 @@
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
*/
- abstract private class StartServerTask implements Callable<HAGlue> {
+ public abstract class StartServerTask implements Callable<HAGlue> {
private final String name;
private final String configName;
Added: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java (rev 0)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java 2014-04-02 13:12:56 UTC (rev 8026)
@@ -0,0 +1,466 @@
+package com.bigdata.journal.jini.ha;
+
+import java.io.File;
+import java.io.IOException;
+import java.rmi.Remote;
+import java.security.DigestException;
+import java.security.NoSuchAlgorithmException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import com.bigdata.ha.HAGlue;
+import com.bigdata.jini.start.IServiceListener;
+import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownATask;
+import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownBTask;
+import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownCTask;
+import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownTask;
+import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.ServiceListener;
+import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartATask;
+import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartBTask;
+import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartCTask;
+import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartServerTask;
+import com.bigdata.quorum.AsynchronousQuorumCloseException;
+
+public class AbstractHA5JournalServerTestCase extends
+ AbstractHA3JournalServerTestCase {
+
+ /**
+ * The {@link Remote} interfaces for these services (if started and
+ * successfully discovered).
+ */
+ protected HAGlue serverD = null;
+
+ protected HAGlue serverE = null;
+
+ /**
+ * {@link UUID}s for the {@link HAJournalServer}s.
+ */
+ private UUID serverDId = UUID.randomUUID();
+ private UUID serverEId = UUID.randomUUID();
+
+ /**
+ * The HTTP ports at which the services will respond.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/730" > Allow configuration
+ * of embedded NSS jetty server using jetty-web.xml </a>
+ */
+ protected final int D_JETTY_PORT = C_JETTY_PORT + 1;
+ protected final int E_JETTY_PORT = D_JETTY_PORT + 1;
+
+ protected String getZKConfigFile() {
+ return "zkClient5.config"; // 5 stage pipeline
+ }
+
+ /**
+ * These {@link IServiceListener}s are used to reliably detect that the
+ * corresponding process starts and (most importantly) that it is really
+ * dies once it has been shutdown or destroyed.
+ */
+ protected ServiceListener serviceListenerD = null, serviceListenerE = null;
+
+ protected File getServiceDirD() {
+ return new File(getTestDir(), "D");
+ }
+
+ protected File getServiceDirE() {
+ return new File(getTestDir(), "E");
+ }
+
+ protected File getHAJournalFileD() {
+ return new File(getServiceDirD(), "bigdata-ha.jnl");
+ }
+
+ protected File getHAJournalFileE() {
+ return new File(getServiceDirE(), "bigdata-ha.jnl");
+ }
+
+ protected File getHALogDirD() {
+ return new File(getServiceDirD(), "HALog");
+ }
+
+ protected File getHALogDirE() {
+ return new File(getServiceDirE(), "HALog");
+ }
+
+ /**
+ * Start A then B then C. As each service starts, this method waits for that
+ * service to appear in the pipeline in the proper position.
+ *
+ * @return The ordered array of services <code>[A, B, C]</code>
+ */
+ protected HAGlue[] startSequenceABCDE() throws Exception {
+
+ startA();
+ awaitPipeline(new HAGlue[] { serverA });
+
+ startB();
+ awaitPipeline(new HAGlue[] { serverA, serverB });
+
+ startC();
+ awaitPipeline(new HAGlue[] { serverA, serverB, serverC });
+
+ startD();
+ awaitPipeline(new HAGlue[] { serverA, serverB, serverC, serverD });
+
+ startE();
+ awaitPipeline(new HAGlue[] { serverA, serverB, serverC, serverD, serverE });
+
+ return new HAGlue[] { serverA, serverB, serverC, serverD, serverE };
+
+ }
+
+ /**
+ * Helper class for simultaneous/seqeunced start of 3 HA services.
+ */
+ protected class ABCDE {
+
+ /**
+ * The services.
+ */
+ final HAGlue serverA, serverB, serverC, serverD, serverE;
+
+ /**
+ * Start of 3 HA services (this happens in the ctor).
+ *
+ * @param sequential
+ * True if the startup should be sequential or false
+ * if services should start concurrently.
+ * @throws Exception
+ */
+ public ABCDE(final boolean sequential)
+ throws Exception {
+
+ this(true/* sequential */, true/* newServiceStarts */);
+
+ }
+
+ /**
+ * Start of 3 HA services (this happens in the ctor).
+ *
+ * @param sequential
+ * True if the startup should be sequential or false if
+ * services should start concurrently.
+ * @param newServiceStarts
+ * When <code>true</code> the services are new, the database
+ * should be at <code>commitCounter:=0</code> and the
+ * constructor will check for the implicit create of the
+ * default KB.
+ * @throws Exception
+ */
+ public ABCDE(final boolean sequential, final boolean newServiceStarts)
+ throws Exception {
+
+ if (sequential) {
+
+ final HAGlue[] services = startSequenceABCDE();
+
+ serverA = services[0];
+
+ serverB = services[1];
+
+ serverC = services[2];
+
+ serverD = services[3];
+
+ serverE = services[4];
+
+ } else {
+
+ final List<Callable<HAGlue>> tasks = new LinkedList<Callable<HAGlue>>();
+
+ tasks.add(new StartATask(false/* restart */));
+ tasks.add(new StartBTask(false/* restart */));
+ tasks.add(new StartCTask(false/* restart */));
+ tasks.add(new StartDTask(false/* restart */));
+ tasks.add(new StartETask(false/* restart */));
+
+ // Start all servers in parallel. Wait up to a timeout.
+ final List<Future<HAGlue>> futures = executorService.invokeAll(
+ tasks, 30/* timeout */, TimeUnit.SECONDS);
+
+ serverA = futures.get(0).get();
+
+ serverB = futures.get(1).get();
+
+ serverC = futures.get(2).get();
+
+ serverD = futures.get(3).get();
+
+ serverE = futures.get(4).get();
+
+ }
+
+ // wait for the quorum to fully meet.
+ awaitFullyMetQuorum();
+
+ if(newServiceStarts) {
+ // wait for the initial commit point (KB create).
+ awaitCommitCounter(1L, serverA, serverB, serverC, serverD, serverE);
+ }
+
+ }
+
+ public void shutdownAll() throws InterruptedException,
+ ExecutionException {
+
+ shutdownAll(false/* now */);
+
+ }
+
+ public void shutdownAll(final boolean now) throws InterruptedException,
+ ExecutionException {
+
+ final List<Callable<Void>> tasks = new LinkedList<Callable<Void>>();
+
+ tasks.add(new SafeShutdownATask());
+ tasks.add(new SafeShutdownBTask());
+ tasks.add(new SafeShutdownCTask());
+ tasks.add(new SafeShutdownDTask());
+ tasks.add(new SafeShutdownETask());
+
+ // Start all servers in parallel. Wait up to a timeout.
+ final List<Future<Void>> futures = executorService.invokeAll(
+ tasks, 30/* timeout */, TimeUnit.SECONDS);
+
+ futures.get(0).get();
+ futures.get(1).get();
+ futures.get(2).get();
+ futures.get(3).get();
+ futures.get(4).get();
+
+ }
+
+ public void assertDigestsEqual() throws NoSuchAlgorithmException, DigestException, IOException {
+ assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC, serverD, serverE });
+ }
+
+ }
+
+ protected HAGlue startD() throws Exception {
+
+ return new StartDTask(false/* restart */).call();
+
+ }
+
+ protected HAGlue startE() throws Exception {
+
+ return new StartETask(false/* restart */).call();
+
+ }
+
+ protected HAGlue restartD() throws Exception {
+
+ return new StartDTask(true/* restart */).call();
+
+ }
+
+ protected HAGlue restartE() throws Exception {
+
+ return new StartETask(true/* restart */).call();
+
+ }
+
+ protected void shutdownD() throws IOException {
+ safeShutdown(serverD, getServiceDirD(), serviceListenerD, true);
+
+ serverD = null;
+ serviceListenerD = null;
+ }
+
+ protected void shutdownE() throws IOException {
+ safeShutdown(serverE, getServiceDirE(), serviceListenerE, true);
+
+ serverE = null;
+ serviceListenerE = null;
+ }
+
+ protected class StartDTask extends StartServerTask {
+
+ public StartDTask(final boolean restart) {
+
+ super("D", "HAJournal-D.config", serverDId, D_JETTY_PORT,
+ serviceListenerD = new ServiceListener(), restart);
+
+ }
+
+ @Override
+ public HAGlue call() throws Exception {
+
+ if (restart) {
+
+ safeShutdown(serverD, getServiceDirD(), serviceListenerD);
+
+ serverD = null;
+
+ }
+
+ return serverD = start();
+
+ }
+
+ }
+
+ protected class StartETask extends StartServerTask {
+
+ public StartETask(final boolean restart) {
+
+ super("E", "HAJournal-E.config", serverEId, E_JETTY_PORT,
+ serviceListenerE = new ServiceListener(), restart);
+
+ }
+
+ @Override
+ public HAGlue call() throws Exception {
+
+ if (restart) {
+
+ safeShutdown(serverE, getServiceDirE(), serviceListenerE);
+
+ serverE = null;
+
+ }
+
+ return serverE = start();
+
+ }
+
+ }
+
+ protected class SafeShutdownDTask extends SafeShutdownTask {
+
+ public SafeShutdownDTask() {
+ this(false/* now */);
+ }
+
+ public SafeShutdownDTask(final boolean now) {
+ super(serverD, getServiceDirC(), serviceListenerD, now);
+ }
+
+ }
+
+ protected class SafeShutdownETask extends SafeShutdownTask {
+
+ public SafeShutdownETask() {
+ this(false/* now */);
+ }
+
+ public SafeShutdownETask(final boolean now) {
+ super(serverE, getServiceDirC(), serviceListenerE, now);
+ }
+
+ }
+ public AbstractHA5JournalServerTestCase() {
+ }
+
+ public AbstractHA5JournalServerTestCase(final String name) {
+ super(name);
+ }
+
+ protected void destroyAll() throws AsynchronousQuorumCloseException,
+ InterruptedException, TimeoutException {
+ /**
+ * The most reliable tear down is in reverse pipeline order.
+ *
+ * This may not be necessary long term but for now we want to avoid
+ * destroying the leader first since it can lead to problems as
+ * followers attempt to reform
+ */
+ final HAGlue leader;
+ final File leaderServiceDir;
+ final ServiceListener leaderListener;
+ if (quorum.isQuorumMet()) {
+ final long token = quorum.awaitQuorum(awaitQuorumTimeout,
+ TimeUnit.MILLISECONDS);
+ /*
+ * Note: It is possible to resolve a proxy for a service that has
+ * been recently shutdown or destroyed. This is effectively a data
+ * race.
+ */
+ final HAGlue t = quorum.getClient().getLeader(token);
+ if (t.equals(serverA)) {
+ leader = t;
+ leaderServiceDir = getServiceDirA();
+ leaderListener = serviceListenerA;
+ } else if (t.equals(serverB)) {
+ leader = t;
+ leaderServiceDir = getServiceDirB();
+ leaderListener = serviceListenerB;
+ } else if (t.equals(serverC)) {
+ leader = t;
+ leaderServiceDir = getServiceDirC();
+ leaderListener = serviceListenerC;
+ } else if (t.equals(serverD)) {
+ leader = t;
+ leaderServiceDir = getServiceDirD();
+ leaderListener = serviceListenerD;
+ } else if (t.equals(serverE)) {
+ leader = t;
+ leaderServiceDir = getServiceDirE();
+ leaderListener = serviceListenerE;
+ } else {
+ if (serverA == null && serverB == null && serverC == null && serverD == null && serverE == null) {
+ /*
+ * There are no services running and nothing to shutdown. We
+ * probably resolved a stale proxy to the leader above.
+ */
+ return;
+ }
+ throw new IllegalStateException(
+ "Leader is none of A, B, or C: leader=" + t + ", A="
+ + serverA + ", B=" + serverB + ", C=" + serverC);
+ }
+ } else {
+ leader = null;
+ leaderServiceDir = null;
+ leaderListener = null;
+ }
+
+ if (leader == null || !leader.equals(serverA)) {
+ destroyA();
+ }
+
+ if (leader == null || !leader.equals(serverB)) {
+ destroyB();
+ }
+
+ if (leader == null || !leader.equals(serverC)) {
+ destroyC();
+ }
+
+ if (leader == null || !leader.equals(serverD)) {
+ destroyD();
+ }
+
+ if (leader == null || !leader.equals(serverE)) {
+ destroyE();
+ }
+
+ // Destroy leader last
+ if (leader != null) {
+ safeDestroy(leader, leaderServiceDir, leaderListener);
+
+ serverA = serverB = serverC = serverD = serverE = null;
+ serviceListenerA = serviceListenerC = serviceListenerB = serviceListenerD = serviceListenerE = null;
+ }
+
+ }
+
+ protected void destroyD() {
+ safeDestroy(serverD, getServiceDirD(), serviceListenerD);
+ serverD = null;
+ serviceListenerD = null;
+ }
+
+ protected void destroyE() {
+ safeDestroy(serverE, getServiceDirE(), serviceListenerE);
+ serverE = null;
+ serviceListenerE = null;
+ }
+
+}
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-04-02 13:03:59 UTC (rev 8025)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-04-02 13:12:56 UTC (rev 8026)
@@ -440,8 +440,17 @@
* Wait for the service to report that it is ready as a leader or
* follower.
*/
- haGlue.awaitHAReady(awaitQuorumTimeout, TimeUnit.MILLISECONDS);
+ return awaitNSSAndHAReady(haGlue, awaitQuorumTimeout, TimeUnit.MILLISECONDS);
+ }
+
+ protected HAStatusEnum awaitNSSAndHAReady(final HAGlue haGlue, long timout, TimeUnit unit)
+ throws Exception {
/*
+ * Wait for the service to report that it is ready as a leader or
+ * follower.
+ */
+ haGlue.awaitHAReady(timout, unit);
+ /*
* Wait for the NSS to report the status of the service (this verifies
* that the NSS interface is running).
*/
@@ -525,7 +534,9 @@
protected String getNanoSparqlServerURL(final HAGlue haGlue)
throws IOException {
- return "http://localhost:" + haGlue.getNSSPort();
+ final int port = haGlue.getNSSPort();
+
+ return "http://localhost:" + port;
}
@@ -541,7 +552,7 @@
final String sparqlEndpointURL = getNanoSparqlServerURL(haGlue)
+ "/sparql";
-
+
// Client for talking to the NSS.
final HttpClient httpClient = new DefaultHttpClient(ccm);
@@ -1248,4 +1259,35 @@
}
+ /**
+ * The effective name for this test as used to name the directories in which
+ * we store things.
+ *
+ * TODO If there are method name collisions across the different test
+ * classes then the test suite name can be added to this. Also, if there are
+ * file naming problems, then this value can be munged before it is
+ * returned.
+ */
+ private final String effectiveTestFileName = getClass().getSimpleName()
+ + "." + getName();
+
+ /**
+ * The directory that is the parent of each {@link HAJournalServer}'s
+ * individual service directory.
+ */
+ protected File getTestDir() {
+ return new File(TGT_PATH, getEffectiveTestFileName());
+ }
+
+ /**
+ * The effective name for this test as used to name the directories in which
+ * we store things.
+ */
+ protected String getEffectiveTestFileName() {
+
+ return effectiveTestFileName;
+
+ }
+
+
}
Added: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config (rev 0)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config 2014-04-02 13:12:56 UTC (rev 8026)
@@ -0,0 +1,288 @@
+import net.jini.jeri.BasicILFactory;
+import net.jini.jeri.BasicJeriExporter;
+import net.jini.jeri.tcp.TcpServerEndpoint;
+
+import net.jini.discovery.LookupDiscovery;
+import net.jini.core.discovery.LookupLocator;
+import net.jini.core.entry.Entry;
+import net.jini.lookup.entry.Name;
+import net.jini.lookup.entry.Comment;
+import net.jini.lookup.entry.Address;
+import net.jini.lookup.entry.Location;
+import net.jini.lookup.entry.ServiceInfo;
+import net.jini.core.lookup.ServiceTemplate;
+
+import java.io.File;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.util.UUID;
+
+import com.bigdata.util.NV;
+import com.bigdata.util.config.NicUtil;
+import com.bigdata.journal.Options;
+import com.bigdata.journal.BufferMode;
+import com.bigdata.journal.jini.ha.HAJournal;
+import com.bigdata.jini.lookup.entry.*;
+import com.bigdata.service.IBigdataClient;
+import com.bigdata.service.AbstractTransactionService;
+import com.bigdata.service.jini.*;
+import com.bigdata.service.jini.lookup.DataServiceFilter;
+import com.bigdata.service.jini.master.ServicesTemplate;
+import com.bigdata.jini.start.config.*;
+import com.bigdata.jini.util.ConfigMath;
+
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Id;
+
+// imports for various options.
+import com.bigdata.btree.IndexMetadata;
+import com.bigdata.btree.keys.KeyBuilder;
+import com.bigdata.rdf.sail.BigdataSail;
+import com.bigdata.rdf.spo.SPORelation;
+import com.bigdata.rdf.spo.SPOKeyOrder;
+import com.bigdata.rdf.lexicon.LexiconRelation;
+import com.bigdata.rdf.lexicon.LexiconKeyOrder;
+import com.bigdata.rawstore.Bytes;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeUnit.*;
+
+/*
+ * This is a sample configuration file for a highly available Journal. A
+ * version of this file must be available to each HAJournalServer in the
+ * pipeline.
+ */
+
+/*
+ * Globals.
+ */
+bigdata {
+
+ private static fedname = "benchmark";
+
+ // NanoSparqlServer (http) port.
+ private static nssPort = ConfigMath.add(8090,3);
+
+ // write replication pipeline port (listener).
+ private static haPort = ConfigMath.add(9090,3);
+
+ // The #of services in the write pipeline.
+ private static replicationFactor = 5;
+
+ // The logical service identifier shared by all members of the quorum.
+ private static logicalServiceId = System.getProperty("test.logicalServiceId","CI-HAJournal-1");
+
+ // The service directory.
+ // Note: Overridden by environment property when deployed.
+ private static serviceDir = new File(System.getProperty("test.serviceDir",ConfigMath.getAbsolutePath(new File(new File(fedname,logicalServiceId),"D"))));
+ //new File(new File(fedname,logicalServiceId),"D");
+
+ // journal data directory.
+ private static dataDir = serviceDir;
+
+ // one federation, multicast discovery.
+ //static private groups = LookupDiscovery.ALL_GROUPS;
+
+ // unicast discovery or multiple setups, MUST specify groups.
+ static private groups = new String[]{bigdata.fedname};
+
+ /**
...
[truncated message content] |
|
From: <mar...@us...> - 2014-04-03 09:48:22
|
Revision: 8032
http://sourceforge.net/p/bigdata/code/8032
Author: martyncutcher
Date: 2014-04-03 09:48:19 +0000 (Thu, 03 Apr 2014)
Log Message:
-----------
Commit to allow branch to be added to CI. Note that this includes a delay added to AbstractJournal.gatherPhase() to support HA1 and which must be removed once the cause of its necessity is identified.
Modified Paths:
--------------
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/RWStrategy.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/WORMStrategy.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2014-04-03 00:41:27 UTC (rev 8031)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2014-04-03 09:48:19 UTC (rev 8032)
@@ -310,7 +310,7 @@
/**
* When a record is used as a read cache then the readCount is
- * maintained as a metric on its access. �This could be used to
+ * maintained as a metric on its access. ???This could be used to
* determine eviction/compaction.
* <p>
* Note: volatile to guarantee visibility of updates. Might do better
@@ -509,7 +509,8 @@
* @param isHighlyAvailable
* when <code>true</code> the whole record checksum is maintained
* for use when replicating the write cache along the write
- * pipeline.
+ * pipeline. This needs to be <code>true</code> for HA1 as well
+ * since we need to write the HALog.
* @param bufferHasData
* when <code>true</code> the caller asserts that the buffer has
* data (from a replicated write), in which case the position
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-03 00:41:27 UTC (rev 8031)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-03 09:48:19 UTC (rev 8032)
@@ -1151,6 +1151,7 @@
done = WriteCache.transferTo(cache/* src */,
curCompactingCache/* dst */, serviceMap, 0/*threshold*/);
if (done) {
+ // Everything was compacted. Send just the address metadata (empty cache block).
sendAddressMetadata(cache);
if (log.isDebugEnabled())
@@ -1164,7 +1165,7 @@
*/
if (flush) {
/*
- * Send out the full cache block.
+ * Send out the full cache block. FIXME Why are we not calling sendAddressMetadata() here?
*/
writeCacheBlock(curCompactingCache);
addClean(curCompactingCache, true/* addFirst */);
@@ -1231,7 +1232,7 @@
* been allocated on the leader in the same order in which the leader
* made those allocations. This information is used to infer the order
* in which the allocators for the different allocation slot sizes are
- * created. This method will synchronous send those address notices and
+ * created. This method will synchronously send those address notices and
* and also makes sure that the followers see the recycled addresses
* records so they can keep both their allocators and the actual
* allocations synchronized with the leader.
@@ -1249,8 +1250,9 @@
throws IllegalStateException, InterruptedException,
ExecutionException, IOException {
- if (quorum == null || !quorum.isHighlyAvailable()
- || !quorum.getClient().isLeader(quorumToken)) {
+// if (quorum == null || !quorum.isHighlyAvailable()
+// || !quorum.getClient().isLeader(quorumToken)) {
+ if (quorum == null) {
return;
}
@@ -1354,7 +1356,7 @@
* unit tests need to be updated to specify [isHighlyAvailable] for
* ALL quorum based test runs.
*/
- final boolean isHA = quorum != null && quorum.isHighlyAvailable();
+ final boolean isHA = quorum != null;
// IFF HA and this is the quorum leader.
final boolean isHALeader = isHA
@@ -1441,10 +1443,12 @@
quorumMember.logWriteCacheBlock(pkg.getMessage(), pkg.getData().duplicate());
// ASYNC MSG RMI + NIO XFER.
- remoteWriteFuture = quorumMember.replicate(null/* req */, pkg.getMessage(),
- pkg.getData().duplicate());
-
- counters.get().nsend++;
+ if (quorum.replicationFactor() > 1) {
+ remoteWriteFuture = quorumMember.replicate(null/* req */, pkg.getMessage(),
+ pkg.getData().duplicate());
+
+ counters.get().nsend++;
+ }
/*
* The quorum leader logs the write cache block here. For the
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-03 00:41:27 UTC (rev 8031)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-03 09:48:19 UTC (rev 8032)
@@ -2473,18 +2473,18 @@
}
- /**
- * Return <code>true</code> if the journal is configured for high
- * availability.
- *
- * @see QuorumManager#isHighlyAvailable()
- */
- public boolean isHighlyAvailable() {
+// /**
+// * Return <code>true</code> if the journal is configured for high
+// * availability.
+// *
+// * @see Quorum#isHighlyAvailable()
+// */
+// public boolean isHighlyAvailable() {
+//
+// return quorum == null ? false : quorum.isHighlyAvailable();
+//
+// }
- return quorum == null ? false : quorum.isHighlyAvailable();
-
- }
-
/**
* {@inheritDoc}
* <p>
@@ -3428,8 +3428,16 @@
if (quorum == null)
return;
-// if (!quorum.isHighlyAvailable())
-// return;
+ if (!quorum.isHighlyAvailable()) {
+ // FIXME: Find the reason why this delay is needed and remove it!
+ //
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ return;
+ }
/**
* CRITICAL SECTION. We need obtain a distributed consensus for the
@@ -3542,6 +3550,19 @@
// reload the commit record from the new root block.
store._commitRecord = store._getCommitRecord();
+ if (quorum != null) {
+ /*
+ * Write the root block on the HALog file, closing out that
+ * file.
+ */
+ final QuorumService<HAGlue> localService = quorum.getClient();
+ try {
+ localService.logRootBlock(newRootBlock);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
if (txLog.isInfoEnabled())
txLog.info("COMMIT: commitTime=" + commitTime);
@@ -3792,7 +3813,7 @@
if (log.isInfoEnabled())
log.info("commitTime=" + commitTime);
- final CommitState cs = new CommitState(this, commitTime);
+ final CommitState cs = new CommitState(this, commitTime);
/*
* Flush application data, decide whether or not the store is dirty,
@@ -3808,6 +3829,7 @@
}
// Do GATHER (iff HA).
+
cs.gatherPhase();
/*
@@ -3846,12 +3868,12 @@
// Prepare the new root block.
cs.newRootBlock();
- if (quorum == null) {
+ if (quorum == null || quorum.replicationFactor() == 1) {
// Non-HA mode.
cs.commitSimple();
- } else {
+ } else {
// HA mode commit (2-phase commit).
cs.commitHA();
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/RWStrategy.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2014-04-03 00:41:27 UTC (rev 8031)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2014-04-03 09:48:19 UTC (rev 8032)
@@ -146,6 +146,7 @@
}
+ @Override
public ByteBuffer read(final long addr) {
try {
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/WORMStrategy.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2014-04-03 00:41:27 UTC (rev 8031)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2014-04-03 09:48:19 UTC (rev 8032)
@@ -272,7 +272,7 @@
* which use this flag to conditionally track the checksum of the entire
* write cache buffer).
*/
- private final boolean isHighlyAvailable;
+ private final boolean isQuorumUsed;
/**
* The {@link UUID} which identifies the journal (this is the same for each
@@ -970,11 +970,11 @@
com.bigdata.journal.Options.HALOG_COMPRESSOR,
com.bigdata.journal.Options.DEFAULT_HALOG_COMPRESSOR);
- isHighlyAvailable = quorum != null && quorum.isHighlyAvailable();
+ isQuorumUsed = quorum != null; // && quorum.isHighlyAvailable();
final boolean useWriteCacheService = fileMetadata.writeCacheEnabled
&& !fileMetadata.readOnly && fileMetadata.closeTime == 0L
- || isHighlyAvailable;
+ || isQuorumUsed;
if (useWriteCacheService) {
/*
@@ -1049,7 +1049,7 @@
final long fileExtent)
throws InterruptedException {
- super(baseOffset, buf, useChecksum, isHighlyAvailable,
+ super(baseOffset, buf, useChecksum, isQuorumUsed,
bufferHasData, opener, fileExtent);
}
@@ -1379,6 +1379,7 @@
* to get the data from another node based on past experience for that
* record.
*/
+ @Override
public ByteBuffer read(final long addr) {
try {
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-03 00:41:27 UTC (rev 8031)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-03 09:48:19 UTC (rev 8032)
@@ -688,7 +688,7 @@
throws InterruptedException {
super(buf, useChecksum, m_quorum != null
- && m_quorum.isHighlyAvailable(), bufferHasData, opener,
+ /*&& m_quorum.isHighlyAvailable()*/, bufferHasData, opener,
fileExtent,
m_bufferedWrite);
@@ -1083,7 +1083,7 @@
final boolean highlyAvailable = m_quorum != null
&& m_quorum.isHighlyAvailable();
- final boolean prefixWrites = highlyAvailable;
+ final boolean prefixWrites = m_quorum != null; // highlyAvailable
return new RWWriteCacheService(m_writeCacheBufferCount,
m_minCleanListSize, m_readCacheBufferCount, prefixWrites, m_compactionThreshold, m_hotCacheSize, m_hotCacheThreshold,
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java 2014-04-03 00:41:27 UTC (rev 8031)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java 2014-04-03 09:48:19 UTC (rev 8032)
@@ -80,8 +80,9 @@
final long fileExtent)
throws InterruptedException {
- final boolean highlyAvailable = getQuorum() != null
- && getQuorum().isHighlyAvailable();
+// final boolean highlyAvailable = getQuorum() != null
+// && getQuorum().isHighlyAvailable();
+ final boolean highlyAvailable = getQuorum() != null;
return new FileChannelScatteredWriteCache(buf, true/* useChecksum */,
highlyAvailable,
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-03 00:41:27 UTC (rev 8031)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-03 09:48:19 UTC (rev 8032)
@@ -184,7 +184,7 @@
// Verify journal can be dumped without error.
dumpJournal(jnl);
-
+
/*
* Now roll that journal forward using the HALog directory.
*/
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-03 00:41:27 UTC (rev 8031)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-03 09:48:19 UTC (rev 8032)
@@ -56,7 +56,7 @@
"com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy(0L,1,0)",
"com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.DefaultSnapshotPolicy("+neverRun+",0)",
// "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()",
- "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true",
+ // "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true",
"com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=1",
};
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2014-04-03 00:41:27 UTC (rev 8031)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2014-04-03 09:48:19 UTC (rev 8032)
@@ -36,8 +36,11 @@
import org.openrdf.model.impl.URIImpl;
import org.openrdf.model.vocabulary.RDF;
+import com.bigdata.ha.HAGlue;
+import com.bigdata.ha.QuorumService;
import com.bigdata.journal.AbstractJournal;
import com.bigdata.journal.IIndexManager;
+import com.bigdata.quorum.Quorum;
import com.bigdata.rdf.axioms.Axioms;
import com.bigdata.rdf.axioms.NoAxioms;
import com.bigdata.rdf.axioms.OwlAxioms;
@@ -641,7 +644,10 @@
final AbstractJournal jnl = (AbstractJournal) indexManager;
- if (jnl.isHighlyAvailable()) {
+ final Quorum<HAGlue, QuorumService<HAGlue>> quorum = jnl
+ .getQuorum();
+
+ if (quorum != null && quorum.isHighlyAvailable()) {
g.add(aService, SD.feature, HighlyAvailable);
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-04-03 00:41:27 UTC (rev 8031)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-04-03 09:48:19 UTC (rev 8032)
@@ -55,10 +55,13 @@
import com.bigdata.bop.engine.QueryLog;
import com.bigdata.bop.fed.QueryEngineFactory;
import com.bigdata.counters.CounterSet;
+import com.bigdata.ha.HAGlue;
+import com.bigdata.ha.QuorumService;
import com.bigdata.journal.AbstractJournal;
import com.bigdata.journal.DumpJournal;
import com.bigdata.journal.IIndexManager;
import com.bigdata.journal.Journal;
+import com.bigdata.quorum.Quorum;
import com.bigdata.rdf.sail.sparql.ast.SimpleNode;
import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask;
import com.bigdata.rdf.sail.webapp.BigdataRDFContext.RunningQuery;
@@ -497,13 +500,18 @@
// final boolean showQuorum = req.getParameter(SHOW_QUORUM) != null;
- if (getIndexManager() instanceof AbstractJournal
- && ((AbstractJournal) getIndexManager())
- .isHighlyAvailable()) {
+ if (getIndexManager() instanceof AbstractJournal) {
- new HAStatusServletUtil(getIndexManager()).
- doGet(req, resp, current);
+ final Quorum<HAGlue, QuorumService<HAGlue>> quorum = ((AbstractJournal) getIndexManager())
+ .getQuorum();
+ if (quorum != null && quorum.isHighlyAvailable()) {
+
+ new HAStatusServletUtil(getIndexManager()).doGet(req, resp,
+ current);
+
+ }
+
}
current.node("br", "Accepted query count="
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-04-06 15:09:20
|
Revision: 8061
http://sourceforge.net/p/bigdata/code/8061
Author: thompsonbry
Date: 2014-04-06 15:09:14 +0000 (Sun, 06 Apr 2014)
Log Message:
-----------
Reconciled my version and Martyns with respect to the HA1/HA5 edits.
See #721 (HA1)
See #722 (HA5)
Modified Paths:
--------------
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-05 15:59:33 UTC (rev 8060)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-06 15:09:14 UTC (rev 8061)
@@ -3555,13 +3555,17 @@
* file.
*/
final QuorumService<HAGlue> localService = quorum.getClient();
- try {
- localService.logRootBlock(newRootBlock);
- } catch (IOException e) {
- throw new RuntimeException(e);
+ if (localService != null) {
+ // Quorum service not asynchronously closed.
+ try {
+ // Write the closing root block on the HALog file.
+ localService.logRootBlock(newRootBlock);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
}
}
-
+
if (txLog.isInfoEnabled())
txLog.info("COMMIT: commitTime=" + commitTime);
@@ -3812,7 +3816,7 @@
if (log.isInfoEnabled())
log.info("commitTime=" + commitTime);
- final CommitState cs = new CommitState(this, commitTime);
+ final CommitState cs = new CommitState(this, commitTime);
/*
* Flush application data, decide whether or not the store is dirty,
@@ -3828,7 +3832,6 @@
}
// Do GATHER (iff HA).
-
cs.gatherPhase();
/*
@@ -3872,7 +3875,7 @@
// Non-HA mode.
cs.commitSimple();
- } else {
+ } else {
// HA mode commit (2-phase commit).
cs.commitHA();
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-05 15:59:33 UTC (rev 8060)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-06 15:09:14 UTC (rev 8061)
@@ -1080,8 +1080,8 @@
private RWWriteCacheService newWriteCacheService() {
try {
- final boolean highlyAvailable = m_quorum != null
- && m_quorum.isHighlyAvailable();
+// final boolean highlyAvailable = m_quorum != null
+// && m_quorum.isHighlyAvailable();
final boolean prefixWrites = m_quorum != null; // highlyAvailable
@@ -1089,7 +1089,8 @@
m_minCleanListSize, m_readCacheBufferCount, prefixWrites, m_compactionThreshold, m_hotCacheSize, m_hotCacheThreshold,
convertAddr(m_fileSize), m_reopener, m_quorum, this) {
-
+
+ @Override
@SuppressWarnings("unchecked")
public WriteCache newWriteCache(final IBufferAccess buf,
final boolean useChecksum,
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java 2014-04-05 15:59:33 UTC (rev 8060)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java 2014-04-06 15:09:14 UTC (rev 8061)
@@ -517,10 +517,11 @@
final long commitCounter = SnapshotManager
.parseCommitCounterFile(journalFile.getName());
+ // TODO Note: Accept version from main development branch when merging versions.
// temporary file in the same directory as the snapshot.
- final File out = File.createTempFile("" + commitCounter + "-",
- Journal.Options.JNL, journalFile.getAbsoluteFile()
- .getParentFile());
+ final File out = File.createTempFile("HARestore-TMP"
+ + commitCounter + "-", Journal.Options.JNL, journalFile
+ .getAbsoluteFile().getParentFile());
System.out.println("Decompressing " + in + " to " + out);
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2014-04-05 15:59:33 UTC (rev 8060)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2014-04-06 15:09:14 UTC (rev 8061)
@@ -164,12 +164,29 @@
+ "KB/IsolatableIndices");
/**
- * A highly available deployment.
+ * A highly available deployment - this feature refers to the presence of
+ * the {@link HAGlue} interface, the capability for online backups, and the
+ * existence of a targer {@link #ReplicationFactor}. You must consult the
+ * target {@link #ReplicationFactor} in order to determine whether the
+ * database is in principle capable of tolerating one or more failures and
+ * the actual #of running joined instances to determine whether the database
+ * can withstand a failure.
*/
static public final URI HighlyAvailable = new URIImpl(BDFNS
+ "HighlyAvailable");
/**
+ * The value of this feature is the target replication factor for the
+ * database expressed as an <code>xsd:int</code>. If this is ONE (1), then
+ * the database is setup with a quorum and has the capability for online
+ * backup, but it is not replicated. TWO (2) indicates mirroring, but is not
+ * highly available. THREE (3) is the minimum configuration that can
+ * withstand a failure.
+ */
+ static public final URI ReplicationFactor = new URIImpl(BDFNS
+ + "replicationCount");
+
+ /**
* An {@link IBigdataFederation}.
*/
static public final URI ScaleOut = new URIImpl(BDFNS
@@ -647,8 +664,13 @@
final Quorum<HAGlue, QuorumService<HAGlue>> quorum = jnl
.getQuorum();
- if (quorum != null && quorum.isHighlyAvailable()) {
+ if (quorum != null) {
+ final int k = quorum.replicationFactor();
+
+ g.add(aService, SD.ReplicationFactor, tripleStore
+ .getValueFactory().createLiteral(k));
+
g.add(aService, SD.feature, HighlyAvailable);
}
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-04-05 15:59:33 UTC (rev 8060)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-04-06 15:09:14 UTC (rev 8061)
@@ -505,7 +505,7 @@
final Quorum<HAGlue, QuorumService<HAGlue>> quorum = ((AbstractJournal) getIndexManager())
.getQuorum();
- if (quorum != null && quorum.isHighlyAvailable()) {
+ if (quorum != null) {//&& quorum.isHighlyAvailable()) {
new HAStatusServletUtil(getIndexManager()).doGet(req, resp,
current);
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <mar...@us...> - 2014-04-09 07:44:35
|
Revision: 8090
http://sourceforge.net/p/bigdata/code/8090
Author: martyncutcher
Date: 2014-04-09 07:44:27 +0000 (Wed, 09 Apr 2014)
Log Message:
-----------
For ticket #721: fix to BufferedWrite to ensure buffers are zero padded to the slot size when eliding contiguous writes. This caused a potential problem
with binary equivalence for snapshots.
Modified Paths:
--------------
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-08 22:24:37 UTC (rev 8089)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-09 07:44:27 UTC (rev 8090)
@@ -137,6 +137,9 @@
}
+ // Used to zero pad slots in buffered writes
+ final byte[] s_zeros = new byte[256];
+
/**
* Buffer a write.
*
@@ -188,6 +191,19 @@
}
// copy the caller's record into the buffer.
m_data.put(data);
+
+ // if data_len < slot_len then clear remainder of buffer
+ int padding = slot_len - data_len;
+ while (padding > 0) {
+ if (padding > s_zeros.length) {
+ m_data.put(s_zeros);
+ padding -= s_zeros.length;
+ } else {
+ m_data.put(s_zeros, 0, padding);
+ break;
+ }
+ }
+
// update the file offset by the size of the allocation slot
m_endAddr += slot_len;
// update the buffer position by the size of the allocation slot.
@@ -250,8 +266,9 @@
final ByteBuffer m_data = tmp.buffer();
// reset the buffer state.
- m_data.position(0);
- m_data.limit(m_data.capacity());
+ //m_data.position(0);
+ //m_data.limit(m_data.capacity());
+ m_data.clear();
m_startAddr = -1;
m_endAddr = 0;
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2014-04-08 22:24:37 UTC (rev 8089)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2014-04-09 07:44:27 UTC (rev 8090)
@@ -839,6 +839,7 @@
m_statsBucket.allocate(size);
}
+
return value;
} else {
StringBuilder sb = new StringBuilder();
@@ -1300,4 +1301,33 @@
return count;
}
+ /**
+ * Determines if the provided physical address is within an allocated slot
+ * @param addr
+ * @return
+ */
+ public boolean verifyAllocatedAddress(long addr) {
+ if (log.isTraceEnabled())
+ log.trace("Checking Allocator " + m_index + ", size: " + m_size);
+
+ final Iterator<AllocBlock> blocks = m_allocBlocks.iterator();
+ final long range = m_size * m_bitSize * 32;
+ while (blocks.hasNext()) {
+ final int startAddr = blocks.next().m_addr;
+ if (startAddr != 0) {
+ final long start = RWStore.convertAddr(startAddr);
+ final long end = start + range;
+
+ if (log.isTraceEnabled())
+ log.trace("Checking " + addr + " between " + start + " - " + end);
+
+ if (addr >= start && addr < end)
+ return true;
+ } else {
+ break;
+ }
+ }
+ return false;
+ }
+
}
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-08 22:24:37 UTC (rev 8089)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-09 07:44:27 UTC (rev 8090)
@@ -6963,7 +6963,7 @@
if (log.isDebugEnabled())
log.debug("writeRaw: " + offset);
-
+
// Guard IO against concurrent file extension.
final Lock lock = m_extensionLock.readLock();
@@ -7068,6 +7068,22 @@
}
}
+ /**
+ * Can be used to determine if an address is within an allocated slot.
+ *
+ * @param addr
+ * @return whether addr is within slot allocated area
+ */
+ public boolean verifyAllocatedAddress(final long addr) {
+ for (int index = 0; index < m_allocs.size(); index++) {
+ final FixedAllocator xfa = m_allocs.get(index);
+ if (xfa.verifyAllocatedAddress(addr))
+ return true;
+ }
+
+ return false;
+ }
+
public StoreState getStoreState() {
final RWStoreState ret = new RWStoreState(this);
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-08 22:24:37 UTC (rev 8089)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-09 07:44:27 UTC (rev 8090)
@@ -6,6 +6,8 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
+import net.jini.config.Configuration;
+
import com.bigdata.ha.HAGlue;
import com.bigdata.ha.HAStatusEnum;
import com.bigdata.ha.msg.HARootBlockRequest;
@@ -13,11 +15,8 @@
import com.bigdata.ha.msg.IHASnapshotResponse;
import com.bigdata.journal.IRootBlockView;
import com.bigdata.journal.Journal;
-import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.LargeLoadTask;
import com.bigdata.rdf.sail.webapp.client.RemoteRepository;
-import net.jini.config.Configuration;
-
public class TestHA1SnapshotPolicy extends AbstractHA3BackupTestCase {
public TestHA1SnapshotPolicy() {
@@ -438,8 +437,8 @@
*/
public void testA_snapshot_multipleTx_restore_validate() throws Exception {
- final int N1 = 7; // #of transactions to run before the snapshot.
- final int N2 = 8; // #of transactions to run after the snapshot.
+ final int N1 = 7; //7; // #of transactions to run before the snapshot.
+ final int N2 = 8; //8; // #of transactions to run after the snapshot.
// Start service.
final HAGlue serverA = startA();
@@ -459,13 +458,13 @@
// Now run N transactions.
for (int i = 0; i < N1; i++) {
+
+ simpleTransaction();
- simpleTransaction();
-
}
+
+ final long commitCounterN1 = N1 + 1;
- final long commitCounterN1 = N1 + 1;
-
awaitCommitCounter(commitCounterN1, serverA);
/*
@@ -478,7 +477,7 @@
// Snapshot directory is empty.
assertEquals(1, recursiveCount(getSnapshotDirA(),SnapshotManager.SNAPSHOT_FILTER));
-
+
// request snapshot on A.
final Future<IHASnapshotResponse> ft = serverA
.takeSnapshot(new HASnapshotRequest(0/* percentLogSize */));
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-04-15 13:01:37
|
Revision: 8121
http://sourceforge.net/p/bigdata/code/8121
Author: thompsonbry
Date: 2014-04-15 13:01:24 +0000 (Tue, 15 Apr 2014)
Log Message:
-----------
Caught up the HA1/HA5 branch with changes in the main development branch prior to bringing back the HA1/HA5 branch to the main development branch.
See #722 (HA1)
See #723 (HA5)
Modified Paths:
--------------
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/ProcessReaderHelper.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/win/TypeperfCollector.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/FullTextIndex.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/ReadIndexTask.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractSubtask.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/util/CSVReader.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/cache/StressTestGlobalLRU.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithRedirect.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithSplits.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTSearchOptimizer.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SearchServiceFactory.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNamedGraphs.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestUnions.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java
branches/BIGDATA_MGC_HA1_HA5/build.xml
branches/BIGDATA_MGC_HA1_HA5/src/resources/HAJournal/HAJournal.config
branches/BIGDATA_MGC_HA1_HA5/src/resources/bin/startHAServices
branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/init.d/bigdataHA
Added Paths:
-----------
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.rq
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.srx
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.trig
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888b.rq
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.rq
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.srx
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.ttl
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.rq
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.srx
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.ttl
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874b.rq
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/831.rq
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/831.ttl
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/874.rq
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/874.ttl
branches/BIGDATA_MGC_HA1_HA5/src/resources/bin/HARestore
branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/
branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/bigdata/
branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/bigdataHA
Removed Paths:
-------------
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb
branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/bigdata/
branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/bigdata/
branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/bigdataHA
Property Changed:
----------------
branches/BIGDATA_MGC_HA1_HA5/
branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/htree/raba/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/jsr166/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/bop/joinGraph/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/bop/util/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/jsr166/
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/util/httpd/
branches/BIGDATA_MGC_HA1_HA5/bigdata-compatibility/
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/attr/
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/disco/
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/util/config/
branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/
branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/btc/
branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/btc/src/resources/
branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/lubm/
branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/uniprot/
branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/uniprot/src/
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/changesets/
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/error/
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/internal/
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/relation/
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/util/
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/samples/
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/internal/
branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/relation/
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/
branches/BIGDATA_MGC_HA1_HA5/dsi-utils/
branches/BIGDATA_MGC_HA1_HA5/dsi-utils/LEGAL/
branches/BIGDATA_MGC_HA1_HA5/dsi-utils/lib/
branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/
branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/java/
branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/java/it/
branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/java/it/unimi/
branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/test/
branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/test/it/unimi/
branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/test/it/unimi/dsi/
branches/BIGDATA_MGC_HA1_HA5/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/
branches/BIGDATA_MGC_HA1_HA5/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/
branches/BIGDATA_MGC_HA1_HA5/osgi/
branches/BIGDATA_MGC_HA1_HA5/src/resources/bin/config/
Index: branches/BIGDATA_MGC_HA1_HA5
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5 2014-04-15 12:51:02 UTC (rev 8120)
+++ branches/BIGDATA_MGC_HA1_HA5 2014-04-15 13:01:24 UTC (rev 8121)
Property changes on: branches/BIGDATA_MGC_HA1_HA5
___________________________________________________________________
Modified: svn:mergeinfo
## -1,5 +1,6 ##
/branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785
/branches/BIGDATA_RELEASE_1_2_0:6766-7380
+/branches/BIGDATA_RELEASE_1_3_0:8025-8120
/branches/BTREE_BUFFER_BRANCH:2004-2045
/branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782
/branches/INT64_BRANCH:4486-4522
\ No newline at end of property
Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty 2014-04-15 12:51:02 UTC (rev 8120)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty 2014-04-15 13:01:24 UTC (rev 8121)
Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty
___________________________________________________________________
Modified: svn:mergeinfo
## -1,5 +1,6 ##
/branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380
+/branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty:8025-8120
/branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522
/branches/MGC_1_3_0/bigdata/lib/jetty:7609-7752
/branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
\ No newline at end of property
Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate 2014-04-15 12:51:02 UTC (rev 8120)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate 2014-04-15 13:01:24 UTC (rev 8121)
Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate
___________________________________________________________________
Modified: svn:mergeinfo
## -1,5 +1,6 ##
/branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380
+/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:8025-8120
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522
/branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:7609-7752
/branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
\ No newline at end of property
Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph 2014-04-15 12:51:02 UTC (rev 8120)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph 2014-04-15 13:01:24 UTC (rev 8121)
Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph
___________________________________________________________________
Modified: svn:mergeinfo
## -1,5 +1,6 ##
/branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7380
+/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph:8025-8120
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522
/branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph:7609-7752
/branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
\ No newline at end of property
Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util 2014-04-15 12:51:02 UTC (rev 8120)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util 2014-04-15 13:01:24 UTC (rev 8121)
Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util
___________________________________________________________________
Modified: svn:mergeinfo
## -1,5 +1,6 ##
/branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7380
+/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util:8025-8120
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522
/branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/util:7609-7752
/branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801
\ No newline at end of property
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/ProcessReaderHelper.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/ProcessReaderHelper.java 2014-04-15 12:51:02 UTC (rev 8120)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/ProcessReaderHelper.java 2014-04-15 13:01:24 UTC (rev 8121)
@@ -84,11 +84,11 @@
*/
public String readLine() throws IOException, InterruptedException {
- final Thread t = Thread.currentThread();
+// final Thread t = Thread.currentThread();
while(getActiveProcess().isAlive()) {
- if(t.isInterrupted()) {
+ if(Thread.interrupted()) {
throw new InterruptedException();
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/win/TypeperfCollector.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/win/TypeperfCollector.java 2014-04-15 12:51:02 UTC (rev 8120)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/win/TypeperfCollector.java 2014-04-15 13:01:24 UTC (rev 8121)
@@ -31,6 +31,7 @@
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.LineNumberReader;
+import java.nio.channels.ClosedByInterruptException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Arrays;
@@ -39,6 +40,7 @@
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.CancellationException;
import org.apache.log4j.Logger;
@@ -52,6 +54,7 @@
import com.bigdata.counters.IRequiredHostCounters;
import com.bigdata.util.CSVReader;
import com.bigdata.util.CSVReader.Header;
+import com.bigdata.util.InnerCause;
/**
* Collects per-host performance counters on a Windows platform using
@@ -68,19 +71,19 @@
*/
public class TypeperfCollector extends AbstractProcessCollector {
- static protected final Logger log = Logger.getLogger(TypeperfCollector.class);
+ static private final Logger log = Logger.getLogger(TypeperfCollector.class);
- /**
- * True iff the {@link #log} level is INFO or less.
- */
- final protected static boolean INFO = log.isInfoEnabled();
+// /**
+// * True iff the {@link #log} level is INFO or less.
+// */
+// final protected static boolean INFO = log.isInfoEnabled();
+//
+// /**
+// * True iff the {@link #log} level is DEBUG or less.
+// */
+// final protected static boolean DEBUG = log.isDebugEnabled();
/**
- * True iff the {@link #log} level is DEBUG or less.
- */
- final protected static boolean DEBUG = log.isDebugEnabled();
-
- /**
* Updated each time a new row of data is read from the process and reported
* as the last modified time for counters based on that process and
* defaulted to the time that we begin to collect performance data.
@@ -175,6 +178,7 @@
}
+ @Override
public Double getValue() {
final Double value = (Double) vals.get(path);
@@ -189,6 +193,7 @@
}
+ @Override
public long lastModified() {
return lastModified;
@@ -199,7 +204,8 @@
* @throws UnsupportedOperationException
* always.
*/
- public void setValue(Double value, long timestamp) {
+ @Override
+ public void setValue(final Double value, final long timestamp) {
throw new UnsupportedOperationException();
@@ -225,6 +231,7 @@
*
* @throws IOException
*/
+ @Override
public List<String> getCommand() {
// make sure that our counters have been declared.
@@ -243,7 +250,7 @@
// counter names need to be double quoted for the command line.
command.add("\"" + decl.getCounterNameForWindows() + "\"");
- if(INFO) log.info("Will collect: \""
+ if(log.isInfoEnabled()) log.info("Will collect: \""
+ decl.getCounterNameForWindows() + "\" as "
+ decl.getPath());
@@ -255,6 +262,7 @@
}
+ @Override
public AbstractProcessReader getProcessReader() {
return new ProcessReader();
@@ -290,9 +298,10 @@
}
+ @Override
public void run() {
- if(INFO)
+ if(log.isInfoEnabled())
log.info("");
try {
@@ -300,27 +309,34 @@
// run
read();
- } catch (InterruptedException e) {
+ } catch (Exception e) {
- // Note: This is a normal exit.
- if(INFO)
- log.info("Interrupted - will terminate");
+ if (InnerCause.isInnerCause(e, InterruptedException.class)||
+ InnerCause.isInnerCause(e, ClosedByInterruptException.class)||
+ InnerCause.isInnerCause(e, CancellationException.class)
+ ) {
- } catch (Exception e) {
+ // Note: This is a normal exit.
+ if (log.isInfoEnabled())
+ log.info("Interrupted - will terminate");
- // Unexpected error.
- log.fatal(e.getMessage(), e);
+ } else {
+ // Unexpected error.
+ log.fatal(e.getMessage(), e);
+
+ }
+
}
- if(INFO)
+ if(log.isInfoEnabled())
log.info("Terminated");
}
private void read() throws Exception {
- if(INFO)
+ if(log.isInfoEnabled())
log.info("");
long nsamples = 0;
@@ -345,33 +361,34 @@
*/
csvReader.setTailDelayMillis(100/* ms */);
- try {
+// try {
- // read headers from the file.
- csvReader.readHeaders();
+ // read headers from the file.
+ csvReader.readHeaders();
- } catch (IOException ex) {
+// } catch (IOException ex) {
+//
+// /*
+// * Note: An IOException thrown out here often indicates an
+// * asynchronous close of of the reader. A common and benign
+// * cause of that is closing the input stream because the service
+// * is shutting down.
+// */
+//
+// if (!Thread.interrupted())
+// throw ex;
+//
+// throw new InterruptedException();
+//
+// }
- /*
- * Note: An IOException thrown out here often indicates an
- * asynchronous close of of the reader. A common and benign
- * cause of that is closing the input stream because the service
- * is shutting down.
- */
-
- if (!Thread.currentThread().isInterrupted())
- throw ex;
-
- throw new InterruptedException();
-
- }
-
/*
* replace the first header definition so that we get clean
* timestamps.
*/
csvReader.setHeader(0, new Header("Timestamp") {
- public Object parseValue(String text) {
+ @Override
+ public Object parseValue(final String text) {
try {
return f.parse(text);
@@ -390,7 +407,7 @@
*/
{
- if(INFO)
+ if(log.isInfoEnabled())
log.info("setting up headers.");
int i = 1;
@@ -400,7 +417,7 @@
final String path = decl.getPath();
// String path = hostPathPrefix + decl.getPath();
- if (INFO)
+ if (log.isInfoEnabled())
log.info("setHeader[i=" + i + "]=" + path);
csvReader.setHeader(i++, new Header(path));
@@ -409,13 +426,20 @@
}
- if(INFO)
+ if(log.isInfoEnabled())
log.info("starting row reads");
- final Thread t = Thread.currentThread();
+// final Thread t = Thread.currentThread();
- while (!t.isInterrupted() && csvReader.hasNext()) {
+ while (true) {
+ if (Thread.interrupted())
+ throw new InterruptedException();
+
+ if (!csvReader.hasNext()) {
+ ...
[truncated message content] |
|
From: <tho...@us...> - 2014-04-15 14:16:19
|
Revision: 8122
http://sourceforge.net/p/bigdata/code/8122
Author: thompsonbry
Date: 2014-04-15 14:16:12 +0000 (Tue, 15 Apr 2014)
Log Message:
-----------
Reconciled changes in the main development branch into the HA1/HA5 branch in preparation for merge back to the main development branch. See #722 and #723.
Modified Paths:
--------------
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config
branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java
Removed Paths:
-------------
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient1.config
branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient5.config
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-15 14:16:12 UTC (rev 8122)
@@ -142,7 +142,7 @@
*
* @see <a href="http://trac.bigdata.com/ticket/721#comment:10"> HA1 </a>
*/
- private final byte[] s_zeros = new byte[256];
+ static private final byte[] s_zeros = new byte[256];
/**
* Buffer a write.
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2014-04-15 14:16:12 UTC (rev 8122)
@@ -310,7 +310,7 @@
/**
* When a record is used as a read cache then the readCount is
- * maintained as a metric on its access. ???This could be used to
+ * maintained as a metric on its access. This could be used to
* determine eviction/compaction.
* <p>
* Note: volatile to guarantee visibility of updates. Might do better
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-15 14:16:12 UTC (rev 8122)
@@ -1165,7 +1165,7 @@
*/
if (flush) {
/*
- * Send out the full cache block. FIXME Why are we not calling sendAddressMetadata() here?
+ * Send out the full cache block.
*/
writeCacheBlock(curCompactingCache);
addClean(curCompactingCache, true/* addFirst */);
@@ -1245,6 +1245,8 @@
* @throws InterruptedException
* @throws ExecutionException
* @throws IOException
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a>
*/
private void sendAddressMetadata(final WriteCache cache)
throws IllegalStateException, InterruptedException,
@@ -1345,19 +1347,14 @@
private void writeCacheBlock(final WriteCache cache)
throws InterruptedException, ExecutionException, IOException {
-// /*
-// * IFF HA
-// *
-// * TODO isHA should be true even if the quorum is not highly
-// * available since there still could be other services in the write
-// * pipeline (e.g., replication to an offline HAJournalServer prior
-// * to changing over into an HA3 quorum or off-site replication). The
-// * unit tests need to be updated to specify [isHighlyAvailable] for
-// * ALL quorum based test runs.
-// */
-// final boolean isHA = quorum != null && quorum.isHighlyAvailable();
-
- // IFF HA and this is the quorum leader.
+ /**
+ * IFF HA and this is the quorum leader.
+ *
+ * Note: This is true for HA1 as well. The code path enabled by this
+ * is responsible for writing the HALog files.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a>
+ */
final boolean isHALeader = quorum != null
&& quorum.getClient().isLeader(quorumToken);
@@ -1441,6 +1438,12 @@
*/
quorumMember.logWriteCacheBlock(pkg.getMessage(), pkg.getData().duplicate());
+ /*
+ * TODO Do we want to always support the replication code path
+ * when a quorum exists (that is, also for HA1) in case there
+ * are pipeline listeners that are not HAJournalServer
+ * instances? E.g., for offsite replication?
+ */
if (quorum.replicationFactor() > 1) {
// ASYNC MSG RMI + NIO XFER.
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-15 14:16:12 UTC (rev 8122)
@@ -3429,13 +3429,8 @@
return;
if (!quorum.isHighlyAvailable()) {
- // FIXME: Find the reason why this delay is needed to pass HA1 snapshot tests
-// try {
-// Thread.sleep(1000);
-// } catch (InterruptedException e) {
-// e.printStackTrace();
-// }
- return;
+ // Gather and 2-phase commit are not used in HA1.
+ return;
}
/**
@@ -3550,9 +3545,11 @@
store._commitRecord = store._getCommitRecord();
if (quorum != null) {
- /*
+ /**
* Write the root block on the HALog file, closing out that
* file.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a>
*/
final QuorumService<HAGlue> localService = quorum.getClient();
if (localService != null) {
@@ -3872,7 +3869,7 @@
if (quorum == null || quorum.replicationFactor() == 1) {
- // Non-HA mode.
+ // Non-HA mode (including HA1).
cs.commitSimple();
} else {
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2014-04-15 14:16:12 UTC (rev 8122)
@@ -521,7 +521,7 @@
final int nbuffers = 1;
final boolean useChecksums = false;
- final boolean isHighlyAvailable = true; // for HA1! false;
+ final boolean isHighlyAvailable = true;
// No write pipeline.
final int k = 1;
@@ -619,7 +619,7 @@
final int nbuffers = 2;
final boolean useChecksums = false;
- final boolean isHighlyAvailable = true; // for HA1! false;
+ final boolean isHighlyAvailable = true;
// No write pipeline.
final int k = 1;
@@ -672,7 +672,7 @@
*/
final double largeRecordRate = 0d;
final boolean useChecksums = false;
- final boolean isHighlyAvailable = true; // for HA1! false;
+ final boolean isHighlyAvailable = true;
// No write pipeline.
final int k = 1;
@@ -717,7 +717,7 @@
final int nbuffers = 6;
final boolean useChecksums = true;
- final boolean isHighlyAvailable = true; // for HA1! false;
+ final boolean isHighlyAvailable = true;
// No write pipeline.
final int k = 1;
@@ -770,7 +770,7 @@
*/
final double largeRecordRate = 0d;
final boolean useChecksums = true;
- final boolean isHighlyAvailable = true; // for HA1! false;
+ final boolean isHighlyAvailable = true;
// No write pipeline.
final int k = 1;
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-04-15 14:16:12 UTC (rev 8122)
@@ -4574,7 +4574,7 @@
// }
- log.warn("Starting NSS from " + jettyXml);
+ log.warn("Starting NSS");
// Start the server.
jettyServer.start();
@@ -4658,9 +4658,8 @@
if (tmp == null)
throw new IllegalStateException("Server is not running");
- final int port = tmp.getConnectors()[0].getLocalPort();
- haLog.warn("Returning NSSPort: " + port);
- return port;
+ return tmp.getConnectors()[0].getLocalPort();
+
}
/**
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-15 14:16:12 UTC (rev 8122)
@@ -63,6 +63,13 @@
super(name);
}
+ @Override
+ protected int replicationFactor() {
+
+ return 3;
+
+ }
+
/**
* Issue HTTP request to a service to take a snapshot.
*
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-15 14:16:12 UTC (rev 8122)
@@ -87,7 +87,6 @@
import com.bigdata.jini.util.JiniUtil;
import com.bigdata.journal.IRootBlockView;
import com.bigdata.journal.StoreState;
-import com.bigdata.journal.jini.ha.HAJournalServer.ConfigurationOptions;
import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest;
import com.bigdata.quorum.AbstractQuorumClient;
import com.bigdata.quorum.AsynchronousQuorumCloseException;
@@ -110,7 +109,7 @@
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
*/
-public class AbstractHA3JournalServerTestCase extends
+public abstract class AbstractHA3JournalServerTestCase extends
AbstractHAJournalServerTestCase implements DiscoveryListener {
/** Quorum client used to monitor (or act on) the logical service quorum. */
@@ -133,7 +132,7 @@
* Implementation listens for the death of the child process and can be used
* to decide when the child process is no longer executing.
*/
- public static class ServiceListener implements IServiceListener {
+ static class ServiceListener implements IServiceListener {
private volatile HAGlue haGlue;
private volatile ProcessHelper processHelper;
@@ -152,13 +151,14 @@
this.haGlue = haGlue;
}
- @SuppressWarnings("unused")
- public HAGlue getHAGlue() {
+// @SuppressWarnings("unused")
+// public HAGlue getHAGlue() {
+//
+// return haGlue;
+//
+// }
- return haGlue;
-
- }
-
+ @Override
public void add(final ProcessHelper processHelper) {
if (processHelper == null)
@@ -1378,11 +1378,33 @@
}
- protected String getZKConfigFile() {
- return "zkClient.config";
+ /**
+ * Return the zookeeper client configuration file.
+ */
+ final protected String getZKConfigFile() {
+
+ return "zkClient.config";
+
}
/**
+ * The as-configured replication factor.
+ * <p>
+ * Note: This is defined in the HAJournal.config file, which is where the
+ * {@link HAJournalServer} gets the correct value. We also need to have the
+ * replicationFactor on hand for the test suite so we can setup the quorum
+ * in the test fixture correctly. However, it is difficult to reach the
+ * appropriate HAJournal.config file from the text fixture during
+ * {@link #setUp()}. Therefore, for the test setup, this is achieved by
+ * overriding this abstract method in the test class.
+ */
+ protected int replicationFactor() {
+
+ return 3;
+
+ }
+
+ /**
* Return Zookeeper quorum that can be used to reflect (or act on) the
* distributed quorum state for the logical service.
*
@@ -1407,7 +1429,7 @@
// Note: Save reference.
this.zookeeper = new ZooKeeper(zoohosts, sessionTimeout, new Watcher() {
@Override
- public void process(WatchedEvent event) {
+ public void process(final WatchedEvent event) {
if (log.isInfoEnabled())
log.info(event);
}
@@ -1457,9 +1479,19 @@
logicalServiceZPath = logicalServiceZPathPrefix + "/"
+ logicalServiceId;
- final int replicationFactor = (Integer) config.getEntry(
- ZookeeperClientConfig.Options.NAMESPACE,
- ConfigurationOptions.REPLICATION_FACTOR, Integer.TYPE);
+ /**
+ * Note: This is defined in the HAJournal.config file, which is where
+ * the HAJournalServer gets the correct value.
+ *
+ * However, we also need to have the replicationFactor on hand for the
+ * test suite so we can setup the quorum in the test fixture correctly.
+ */
+ final int replicationFactor = replicationFactor();
+// {
+// replicationFactor = (Integer) config.getEntry(
+// ConfigurationOptions.COMPONENT,
+// ConfigurationOptions.REPLICATION_FACTOR, Integer.TYPE);
+// }
// if (!zka.awaitZookeeperConnected(10, TimeUnit.SECONDS)) {
//
@@ -1565,7 +1597,7 @@
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
*/
- public abstract class StartServerTask implements Callable<HAGlue> {
+ abstract class StartServerTask implements Callable<HAGlue> {
private final String name;
private final String configName;
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java 2014-04-15 14:16:12 UTC (rev 8122)
@@ -1,3 +1,26 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
package com.bigdata.journal.jini.ha;
import java.io.File;
@@ -16,17 +39,13 @@
import com.bigdata.ha.HAGlue;
import com.bigdata.jini.start.IServiceListener;
-import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownATask;
-import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownBTask;
-import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownCTask;
-import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownTask;
-import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.ServiceListener;
-import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartATask;
-import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartBTask;
-import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartCTask;
-import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartServerTask;
import com.bigdata.quorum.AsynchronousQuorumCloseException;
+/**
+ * Test suite for HA5.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
public class AbstractHA5JournalServerTestCase extends
AbstractHA3JournalServerTestCase {
@@ -53,10 +72,6 @@
protected final int D_JETTY_PORT = C_JETTY_PORT + 1;
protected final int E_JETTY_PORT = D_JETTY_PORT + 1;
- protected String getZKConfigFile() {
- return "zkClient5.config"; // 5 stage pipeline
- }
-
/**
* These {@link IServiceListener}s are used to reliably detect that the
* corresponding process starts and (most importantly) that it is really
@@ -88,6 +103,13 @@
return new File(getServiceDirE(), "HALog");
}
+ @Override
+ protected int replicationFactor() {
+
+ return 5;
+
+ }
+
/**
* Start A then B then C. As each service starts, this method waits for that
* service to appear in the pipeline in the proper position.
@@ -141,7 +163,7 @@
}
/**
- * Start of 3 HA services (this happens in the ctor).
+ * Start of 5 HA services (this happens in the ctor).
*
* @param sequential
* True if the startup should be sequential or false if
@@ -362,6 +384,7 @@
super(name);
}
+ @Override
protected void destroyAll() throws AsynchronousQuorumCloseException,
InterruptedException, TimeoutException {
/**
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-04-15 14:16:12 UTC (rev 8122)
@@ -1259,35 +1259,34 @@
}
- /**
- * The effective name for this test as used to name the directories in which
- * we store things.
- *
- * TODO If there are method name collisions across the different test
- * classes then the test suite name can be added to this. Also, if there are
- * file naming problems, then this value can be munged before it is
- * returned.
- */
- private final String effectiveTestFileName = getClass().getSimpleName()
- + "." + getName();
+// /**
+// * The effective name for this test as used to name the directories in which
+// * we store things.
+// *
+// * TODO If there are method name collisions across the different test
+// * classes then the test suite name can be added to this. Also, if there are
+// * file naming problems, then this value can be munged before it is
+// * returned.
+// */
+// private final String effectiveTestFileName = getClass().getSimpleName()
+// + "." + getName();
+//
+// /**
+// * The directory that is the parent of each {@link HAJournalServer}'s
+// * individual service directory.
+// */
+// protected File getTestDir() {
+// return new File(TGT_PATH, getEffectiveTestFileName());
+// }
+//
+// /**
+// * The effective name for this test as used to name the directories in which
+// * we store things.
+// */
+// protected String getEffectiveTestFileName() {
+//
+// return effectiveTestFileName;
+//
+// }
- /**
- * The directory that is the parent of each {@link HAJournalServer}'s
- * individual service directory.
- */
- protected File getTestDir() {
- return new File(TGT_PATH, getEffectiveTestFileName());
- }
-
- /**
- * The effective name for this test as used to name the directories in which
- * we store things.
- */
- protected String getEffectiveTestFileName() {
-
- return effectiveTestFileName;
-
- }
-
-
}
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config 2014-04-15 14:16:12 UTC (rev 8122)
@@ -67,7 +67,7 @@
private static haPort = ConfigMath.add(9090,3);
// The #of services in the write pipeline.
- private static replicationFactor = 5;
+ private static replicationFactor = 5; // Note: overridden in the HA5 test suites.
// The logical service identifier shared by all members of the quorum.
private static logicalServiceId = System.getProperty("test.logicalServiceId","CI-HAJournal-1");
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config 2014-04-15 14:16:12 UTC (rev 8122)
@@ -67,7 +67,7 @@
private static haPort = ConfigMath.add(9090,4);
// The #of services in the write pipeline.
- private static replicationFactor = 5;
+ private static replicationFactor = 5; // Note: overridden in the HA5 test suites.
// The logical service identifier shared by all members of the quorum.
private static logicalServiceId = System.getProperty("test.logicalServiceId","CI-HAJournal-1");
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java 2014-04-15 14:16:12 UTC (rev 8122)
@@ -75,6 +75,13 @@
super(name);
}
+ @Override
+ protected int replicationFactor() {
+
+ return 3;
+
+ }
+
/**
* Complex hack to override the {@link HAJournal} properties.
*
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2014-04-15 14:16:12 UTC (rev 8122)
@@ -74,6 +74,11 @@
// Basic tests for a single HAJournalServer (quorum does not meet)
suite.addTestSuite(TestHAJournalServer.class);
+ // HA1 test suite.
+ suite.addTestSuite(TestHA1JournalServer.class);
+ suite.addTestSuite(TestHA1SnapshotPolicy.class);
+ suite.addTestSuite(TestHA1SnapshotPolicy2.class);
+
// HA2 test suite (k=3, but only 2 services are running).
suite.addTestSuite(TestHA2JournalServer.class);
@@ -108,17 +113,16 @@
// Verify ability to override the HAJournal implementation class.
suite.addTestSuite(TestHAJournalServerOverride.class);
- // Test suite of longer running stress tests for an HA3 cluster.
- suite.addTestSuite(StressTestHA3JournalServer.class);
-
- // Test suite of longer running stress tests for an HA5 cluster.
+ // HA5 test suite.
suite.addTestSuite(TestHA5JournalServer.class);
suite.addTestSuite(TestHA5JournalServerWithHALogs.class);
- // Test suite of longer running stress tests for an HA1 cluster.
- suite.addTestSuite(TestHA1JournalServer.class);
- suite.addTestSuite(TestHA1SnapshotPolicy.class);
- suite.addTestSuite(TestHA1SnapshotPolicy2.class);
+ /*
+ * Stress tests.
+ */
+
+ // Test suite of longer running stress tests for an HA3 cluster.
+ suite.addTestSuite(StressTestHA3JournalServer.class);
return suite;
Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java
===================================================================
--- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java 2014-04-15 13:01:24 UTC (rev 8121)
+++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java 2014-04-15 14:16:12 UTC (rev 8122)
@@ -1,3 +1,26 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
package com.bigdata.journal.jini.ha;
import java.util.concurrent.TimeUnit;
@@ -3,10 +26,13 @@
import java.util.concurrent.TimeoutException;
-import com.bigdata.ha.HAGlue;
-import com.bigdata.ha.HAStatusEnum;
-
import net.jini.config.Configuration;
+import com.bigdata.ha.HAGlue;
+/**
+ * Test suite for HA1.
...
[truncated message content] |