From: <tho...@us...> - 2013-03-21 18:50:18
|
Revision: 7029 http://bigdata.svn.sourceforge.net/bigdata/?rev=7029&view=rev Author: thompsonbry Date: 2013-03-21 18:50:05 +0000 (Thu, 21 Mar 2013) Log Message: ----------- HARestore - added option to transparently decompress a snapshot (the original file is not modified). Tested HARestore on snapshot with 18 HALogs. Rolled forward and was able to use DumpJournal to validate the resulting file. Removed unused pipelineUUIDs field from HAJournal configuration files. Further cleanup on HA3 tests. Modified Paths: -------------- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3SnapshotPolicy.java branches/READ_CACHE/src/resources/HAJournal/HAJournal.config Modified: branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java 2013-03-21 17:03:29 UTC (rev 7028) +++ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java 2013-03-21 18:50:05 UTC (rev 7029) @@ -330,7 +330,7 @@ /** * Apply HALog file(s) to the journal. Each HALog file represents a single * native transaction on the database and will advance the journal by one - * commit point. The journal will go through local commit protocol as each + * commit point. The journal will go through a local commit protocol as each * HALog is applied. HALogs will be applied starting with the first commit * point GT the current commit point on the journal. You may optionally * specify a stopping criteria, e.g., the last commit point that you wish to @@ -357,6 +357,7 @@ * </dl> * * @return <code>0</code> iff the operation was fully successful. + * @throws IOException * * @throws Exception * if the {@link UUID}s or other critical metadata of the @@ -365,7 +366,7 @@ * if an error occcur when reading an HALog or writing on the * journal. */ - public static void main(final String[] args) { + public static void main(final String[] args) throws IOException { if (args.length == 0) { @@ -410,7 +411,7 @@ } - if (i != args.length - 1) { + if (i != args.length - 2) { usage(args); @@ -419,11 +420,40 @@ } // Journal file. - final File journalFile = new File(args[i++]); + File journalFile = new File(args[i++]); // HALogDir. final File haLogDir = new File(args[i++]); + /* + * Decompress the snapshot onto a temporary file in the current working + * directory. + */ + + if (journalFile.getName().endsWith(SnapshotManager.SNAPSHOT_EXT)) { + + // source is the snapshot. + final File in = journalFile; + + final String basename = journalFile.getName().substring( + 0, + journalFile.getName().length() + - SnapshotManager.SNAPSHOT_EXT.length()); + + // temporary file in the same directory as the snapshot. + final File out = File.createTempFile(basename + "-", + Journal.Options.JNL, journalFile.getAbsoluteFile() + .getParentFile()); + + System.out.println("Decompressing " + in + " to " + out); + + // Decompress the snapshot. + SnapshotManager.decompress(in, out); + + journalFile = out; + + } + // Validate journal file. { Modified: branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2013-03-21 17:03:29 UTC (rev 7028) +++ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2013-03-21 18:50:05 UTC (rev 7029) @@ -32,6 +32,7 @@ import java.io.FilenameFilter; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.nio.ByteBuffer; import java.security.DigestException; import java.security.MessageDigest; @@ -1037,7 +1038,10 @@ * {@link IHABufferStrategy#computeDigest(Object, MessageDigest)} * * @param commitCounter + * The commit counter that identifies the snapshot. * @param digest + * The digest. + * * @throws IOException * @throws FileNotFoundException * @throws DigestException @@ -1050,6 +1054,33 @@ final File file = getSnapshotFile(commitCounter); + getSnapshotDigest(file, digest); + + } + + /** + * Compute the digest of a snapshot file. + * <p> + * Note: The digest is only computed for the data beyond the file header. + * This is for consistency with + * {@link IHABufferStrategy#computeDigest(Object, MessageDigest)} + * + * @param commitCounter + * The commit counter that identifies the snapshot. + * @param digest + * The digest. + * + * @throws IOException + * @throws FileNotFoundException + * @throws DigestException + * + * TODO We should pin the snapshot if we are reading it to + * compute its digest. + */ + static public void getSnapshotDigest(final File file, + final MessageDigest digest) throws FileNotFoundException, + IOException, DigestException { + // Note: Throws FileNotFoundException. final GZIPInputStream is = new GZIPInputStream( new FileInputStream(file)); @@ -1097,4 +1128,87 @@ } + /** + * Copy the input stream to the output stream. + * + * @param content + * The input stream. + * @param outstr + * The output stream. + * + * @throws IOException + */ + static private void copyStream(final InputStream content, + final OutputStream outstr) throws IOException { + + final byte[] buf = new byte[1024]; + + while (true) { + + final int rdlen = content.read(buf); + + if (rdlen <= 0) { + + break; + + } + + outstr.write(buf, 0, rdlen); + + } + + } + + /** + * Decompress a snapshot onto the specified file. The original file is not + * modified. + * + * @param src + * The snapshot. + * @param dst + * The file onto which the decompressed snapshot will be written. + * + * @throws IOException + * if the source file does not exist. + * @throws IOException + * if the destination file exists and is not empty. + * @throws IOException + * if there is a problem decompressing the source file onto the + * destination file. + */ + public static void decompress(final File src, final File dst) + throws IOException { + + if (!src.exists()) + throw new FileNotFoundException(src.getAbsolutePath()); + + if (!dst.exists() && dst.length() == 0) + throw new IOException("Output file exists and is not empty: " + + dst.getAbsolutePath()); + + if (log.isInfoEnabled()) + log.info("src=" + src + ", dst=" + dst); + + InputStream is = null; + OutputStream os = null; + try { + is = new GZIPInputStream(new FileInputStream(src)); + os = new FileOutputStream(dst); + copyStream(is, os); + os.flush(); + } finally { + if (is != null) + try { + is.close(); + } catch (IOException ex) { + } + if (os != null) + try { + os.close(); + } catch (IOException ex) { + } + } + + } + } Modified: branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-03-21 17:03:29 UTC (rev 7028) +++ branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-03-21 18:50:05 UTC (rev 7029) @@ -2073,4 +2073,118 @@ } + /** + * Task loads a large data set. + */ + protected class LargeLoadTask implements Callable<Void> { + + private final long token; + private final boolean reallyLargeLoad; + + /** + * Large load. + * + * @param token + * The token that must remain valid during the operation. + */ + public LargeLoadTask(final long token) { + + this(token, false/*reallyLargeLoad*/); + + } + + /** + * Either large or really large load. + * + * @param token + * The token that must remain valid during the operation. + * @param reallyLargeLoad + * if we will also load the 3 degrees of freedom file. + */ + public LargeLoadTask(final long token, final boolean reallyLargeLoad) { + + this.token = token; + + this.reallyLargeLoad = reallyLargeLoad; + + } + + public Void call() throws Exception { + + final StringBuilder sb = new StringBuilder(); + sb.append("DROP ALL;\n"); + sb.append("LOAD <" + getFoafFileUrl("data-0.nq.gz") + ">;\n"); + sb.append("LOAD <" + getFoafFileUrl("data-1.nq.gz") + ">;\n"); + sb.append("LOAD <" + getFoafFileUrl("data-2.nq.gz") + ">;\n"); + if (reallyLargeLoad) + sb.append("LOAD <" + getFoafFileUrl("data-3.nq.gz") + ">;\n"); + sb.append("INSERT {?x rdfs:label ?y . } WHERE {?x foaf:name ?y };\n"); + sb.append("PREFIX dc: <http://purl.org/dc/elements/1.1/>\n"); + sb.append("INSERT DATA\n"); + sb.append("{\n"); + sb.append(" <http://example/book1> dc:title \"A new book\" ;\n"); + sb.append(" dc:creator \"A.N.Other\" .\n"); + sb.append("}\n"); + + final String updateStr = sb.toString(); + + final HAGlue leader = quorum.getClient().getLeader(token); + + // Verify quorum is still valid. + quorum.assertQuorum(token); + + getRemoteRepository(leader).prepareUpdate(updateStr).evaluate(); + + // Verify quorum is still valid. + quorum.assertQuorum(token); + + // Done. + return null; + + } + + } + + /** + * Spin, looking for the quorum to fully meet *before* the LOAD is finished. + * + * @return <code>true</code> iff the LOAD finished before the {@link Future} + * was done. + */ + protected boolean awaitFullyMetDuringLOAD(final long token, + final Future<Void> ft) throws InterruptedException, + ExecutionException, TimeoutException { + + final long begin = System.currentTimeMillis(); + boolean fullyMetBeforeLoadDone = false; + while (!fullyMetBeforeLoadDone) { + final long elapsed = System.currentTimeMillis() - begin; + if (elapsed > loadLoadTimeoutMillis) { + /** + * This timeout is a fail safe for LOAD operations that get HUNG + * on the server and prevents CI hangs. + */ + throw new TimeoutException( + "LOAD did not complete in a timely fashion."); + } + try { + if (quorum.isQuorumFullyMet(token) && !ft.isDone()) { + // The quorum is fully met before the load is done. + fullyMetBeforeLoadDone = true; + } + // Check LOAD for error. + ft.get(50/* timeout */, TimeUnit.MILLISECONDS); + // LOAD is done (no errors, future is done). + assertTrue(fullyMetBeforeLoadDone); + break; + } catch (TimeoutException ex) { + // LOAD still running. + continue; + } + } + + return fullyMetBeforeLoadDone; + + } + } Modified: branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config =================================================================== --- branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config 2013-03-21 17:03:29 UTC (rev 7028) +++ branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config 2013-03-21 18:50:05 UTC (rev 7029) @@ -263,8 +263,6 @@ ); */ - pipelineUUIDs = bigdata.pipeline; - replicationFactor = bigdata.replicationFactor; } Modified: branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config =================================================================== --- branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config 2013-03-21 17:03:29 UTC (rev 7028) +++ branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config 2013-03-21 18:50:05 UTC (rev 7029) @@ -262,8 +262,6 @@ ); */ - pipelineUUIDs = bigdata.pipeline; - replicationFactor = bigdata.replicationFactor; } Modified: branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config =================================================================== --- branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config 2013-03-21 17:03:29 UTC (rev 7028) +++ branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config 2013-03-21 18:50:05 UTC (rev 7029) @@ -262,8 +262,6 @@ ); */ - pipelineUUIDs = bigdata.pipeline; - replicationFactor = bigdata.replicationFactor; } Modified: branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-03-21 17:03:29 UTC (rev 7028) +++ branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-03-21 18:50:05 UTC (rev 7029) @@ -32,8 +32,6 @@ import java.io.IOException; import java.util.UUID; import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -42,6 +40,7 @@ import com.bigdata.ha.halog.HALogWriter; import com.bigdata.ha.msg.HARootBlockRequest; import com.bigdata.quorum.Quorum; +import com.bigdata.rdf.sail.webapp.client.HAStatusEnum; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; /** @@ -143,25 +142,6 @@ * files should be purged at that commit point. */ simpleTransaction(); -// { -// -// final StringBuilder sb = new StringBuilder(); -// sb.append("DROP ALL;\n"); -// sb.append("PREFIX dc: <http://purl.org/dc/elements/1.1/>\n"); -// sb.append("INSERT DATA {\n"); -// sb.append(" <http://example/book1> dc:title \"A new book\" ;\n"); -// sb.append(" dc:creator \"A.N.Other\" .\n"); -// sb.append("}\n"); -// -// final String updateStr = sb.toString(); -// -// final HAGlue leader = quorum.getClient().getLeader(token); -// -// // Verify quorum is still valid. -// quorum.assertQuorum(token); -// -// getRemoteRepository(leader).prepareUpdate(updateStr).evaluate(); -// } // Current commit point. final long lastCommitCounter2 = serverA @@ -430,25 +410,6 @@ * files should be retained at that commit point. */ simpleTransaction(); -// { -// final StringBuilder sb = new StringBuilder(); -// sb.append("DROP ALL;\n"); -// sb.append("PREFIX dc: <http://purl.org/dc/elements/1.1/>\n"); -// sb.append("INSERT DATA {\n"); -// sb.append(" <http://example/book1> dc:title \"A new book\" ;\n"); -// sb.append(" dc:creator \"A.N.Other\" .\n"); -// sb.append("}\n"); -// -// final String updateStr = sb.toString(); -// -// final HAGlue leader = quorum.getClient().getLeader(token); -// -// // Verify quorum is still valid. -// quorum.assertQuorum(token); -// -// getRemoteRepository(leader).prepareUpdate(updateStr).evaluate(); -// -// } // Current commit point. final long lastCommitCounter2 = serverA @@ -781,46 +742,6 @@ // Start LOAD. executorService.submit(ft); -// // start concurrent task loads that continue until fully met -// final AtomicBoolean spin = new AtomicBoolean(false); -// final Thread loadThread = new Thread() { -// public void run() { -// final StringBuilder sb = new StringBuilder(); -// sb.append("DROP ALL;\n"); -// sb.append("LOAD <" + getFoafFileUrl("data-0.nq.gz") + ">;\n"); -// sb.append("LOAD <" + getFoafFileUrl("data-1.nq.gz") + ">;\n"); -// sb.append("LOAD <" + getFoafFileUrl("data-2.nq.gz") + ">;\n"); -// sb.append("LOAD <" + getFoafFileUrl("data-3.nq.gz") + ">;\n"); -// sb.append("INSERT {?x rdfs:label ?y . } WHERE {?x foaf:name ?y };\n"); -// sb.append("PREFIX dc: <http://purl.org/dc/elements/1.1/>\n"); -// sb.append("INSERT DATA\n"); -// sb.append("{\n"); -// sb.append(" <http://example/book1> dc:title \"A new book\" ;\n"); -// sb.append(" dc:creator \"A.N.Other\" .\n"); -// sb.append("}\n"); -// -// final String updateStr = sb.toString(); -// -// final HAGlue leader = quorum.getClient().getLeader(token); -// -// // Verify quorum is still valid. -// quorum.assertQuorum(token); -// -// try { -// getRemoteRepository(leader).prepareUpdate(updateStr) -// .evaluate(); -// log.info("Updated"); -// } catch (Exception e) { -// e.printStackTrace(); -// -// fail("Probably unexpected on run ", e); -// } finally { -// spin.set(true); -// } -// } -// }; -// loadThread.start(); - // allow load head start Thread.sleep(300/*ms*/); @@ -835,13 +756,6 @@ // Await LOAD, but with a timeout. ft.get(loadLoadTimeoutMillis, TimeUnit.MILLISECONDS); -// // Need to check if load is active, if not then test has not confirmed active load -// assertFalse(spin.get()); -// -// while (!spin.get()) { -// Thread.sleep(50/*ms*/); -// } - log.info("Should be safe to test digests now"); // Cannot predict last commit counter or whether even logs will remain @@ -1015,44 +929,71 @@ * @throws Exception */ public void testStartABC_RebuildWithPipelineReorganisation() throws Exception { - final ABC startup = new ABC(true/*sequential*/); - - awaitFullyMetQuorum(); + + new ABC(true/* sequential */); + + awaitFullyMetQuorum(); // Now run several transactions for (int i = 0; i < 5; i++) - simpleTransaction(); - + simpleTransaction(); + // shutdown AB and destroy C destroyC(); shutdownA(); shutdownB(); + + /* + * Now restart A, B & C. + * + * Note: We start C first so it will be in the way when A or B attempts + * to become the leader, thus forcing a pipeline reorganization. + */ + final HAGlue serverC = startC(); + awaitPipeline(new HAGlue[] { serverC }); + + // Now start A. + final HAGlue serverA = startA(); + awaitPipeline(new HAGlue[] { serverC, serverA }); - // Now restart A, B & C - final HAGlue serverC = startC(); - awaitPipeline(new HAGlue[] {serverC}); - - final HAGlue serverA = startA(); - awaitPipeline(new HAGlue[] {serverC, serverA}); - final HAGlue serverB = startB(); - - // A & B should meet - awaitMetQuorum(); - - awaitPipeline(new HAGlue[] {serverA, serverB, serverC}); - - // Check HALogs equal - assertHALogDigestsEquals(7L/* firstCommitCounter */, - 7L, new HAGlue[] { serverA, serverB }); - log.warn("CHECK AB LOGS ON MET QUORUM"); - - // C will have go through Rebuild before joining - awaitFullyMetQuorum(); + // And finally start B. + final HAGlue serverB = startB(); + + // A & B should meet + final long token2 = awaitMetQuorum(); + + // The expected pipeline. C was moved to the end. + awaitPipeline(new HAGlue[] { serverA, serverB, serverC }); + + // Wait until A is fully ready. + assertEquals(HAStatusEnum.Leader, awaitNSSAndHAReady(serverA)); + // Verify A is the leader. + assertEquals(serverA, quorum.getClient().getLeader(token2)); + + // Check journals for equality on A, B. + assertDigestsEquals(new HAGlue[] { serverA, serverB }); + + // Check HALogs equal on A, B. + assertHALogDigestsEquals(7L/* firstCommitCounter */, 7L, new HAGlue[] { + serverA, serverB }); + + // C will have go through Rebuild before joining + assertEquals(token2, awaitFullyMetQuorum()); + +// Note: I have seen this timeout. This warrants exploring. BBT. +// // Wait until C is fully ready. +// assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverC)); + // Verify binary equality of ALL journals. assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + // Check HALogs equal on ALL services. + assertHALogDigestsEquals(7L/* firstCommitCounter */, 7L, new HAGlue[] { + serverA, serverB, serverC }); + } - + /** * Test Rebuild of C service where quorum was previously * fully met and where a new quorum is met before C joins for rebuild. @@ -1771,120 +1712,6 @@ } /** - * Task loads a large data set. - */ - private class LargeLoadTask implements Callable<Void> { - - private final long token; - private final boolean reallyLargeLoad; - - /** - * Large load. - * - * @param token - * The token that must remain valid during the operation. - */ - public LargeLoadTask(final long token) { - - this(token, false/*reallyLargeLoad*/); - - } - - /** - * Either large or really large load. - * - * @param token - * The token that must remain valid during the operation. - * @param reallyLargeLoad - * if we will also load the 3 degrees of freedom file. - */ - public LargeLoadTask(final long token, final boolean reallyLargeLoad) { - - this.token = token; - - this.reallyLargeLoad = reallyLargeLoad; - - } - - public Void call() throws Exception { - - final StringBuilder sb = new StringBuilder(); - sb.append("DROP ALL;\n"); - sb.append("LOAD <" + getFoafFileUrl("data-0.nq.gz") + ">;\n"); - sb.append("LOAD <" + getFoafFileUrl("data-1.nq.gz") + ">;\n"); - sb.append("LOAD <" + getFoafFileUrl("data-2.nq.gz") + ">;\n"); - if (reallyLargeLoad) - sb.append("LOAD <" + getFoafFileUrl("data-3.nq.gz") + ">;\n"); - sb.append("INSERT {?x rdfs:label ?y . } WHERE {?x foaf:name ?y };\n"); - sb.append("PREFIX dc: <http://purl.org/dc/elements/1.1/>\n"); - sb.append("INSERT DATA\n"); - sb.append("{\n"); - sb.append(" <http://example/book1> dc:title \"A new book\" ;\n"); - sb.append(" dc:creator \"A.N.Other\" .\n"); - sb.append("}\n"); - - final String updateStr = sb.toString(); - - final HAGlue leader = quorum.getClient().getLeader(token); - - // Verify quorum is still valid. - quorum.assertQuorum(token); - - getRemoteRepository(leader).prepareUpdate(updateStr).evaluate(); - - // Verify quorum is still valid. - quorum.assertQuorum(token); - - // Done. - return null; - - } - - } - - /** - * Spin, looking for the quorum to fully meet *before* the LOAD is finished. - * - * @return <code>true</code> iff the LOAD finished before the {@link Future} - * was done. - */ - private boolean awaitFullyMetDuringLOAD(final long token, - final Future<Void> ft) throws InterruptedException, - ExecutionException, TimeoutException { - - final long begin = System.currentTimeMillis(); - boolean fullyMetBeforeLoadDone = false; - while (!fullyMetBeforeLoadDone) { - final long elapsed = System.currentTimeMillis() - begin; - if (elapsed > loadLoadTimeoutMillis) { - /** - * This timeout is a fail safe for LOAD operations that get HUNG - * on the server and prevents CI hangs. - */ - throw new TimeoutException( - "LOAD did not complete in a timely fashion."); - } - try { - if (quorum.isQuorumFullyMet(token) && !ft.isDone()) { - // The quorum is fully met before the load is done. - fullyMetBeforeLoadDone = true; - } - // Check LOAD for error. - ft.get(50/* timeout */, TimeUnit.MILLISECONDS); - // LOAD is done (no errors, future is done). - assertTrue(fullyMetBeforeLoadDone); - break; - } catch (TimeoutException ex) { - // LOAD still running. - continue; - } - } - - return fullyMetBeforeLoadDone; - - } - - /** * Start A+B+C in strict sequence. Wait until the quorum fully meets. Start * a long running LOAD. While the LOAD is running, fail C (the last * follower). Verify that the LOAD completes successfully with the remaining Modified: branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3SnapshotPolicy.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3SnapshotPolicy.java 2013-03-21 17:03:29 UTC (rev 7028) +++ branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3SnapshotPolicy.java 2013-03-21 18:50:05 UTC (rev 7029) @@ -479,30 +479,8 @@ /* * LOAD data on leader. */ - { + new LargeLoadTask(token, true/* reallyLargeLoad */).call(); - final StringBuilder sb = new StringBuilder(); - sb.append("DROP ALL;\n"); - sb.append("LOAD <" + getFoafFileUrl("data-0.nq.gz") + ">;\n"); - sb.append("LOAD <" + getFoafFileUrl("data-1.nq.gz") + ">;\n"); - sb.append("LOAD <" + getFoafFileUrl("data-2.nq.gz") + ">;\n"); - sb.append("LOAD <" + getFoafFileUrl("data-3.nq.gz") + ">;\n"); - sb.append("INSERT {?x rdfs:label ?y . } WHERE {?x foaf:name ?y };\n"); - sb.append("PREFIX dc: <http://purl.org/dc/elements/1.1/>\n"); - sb.append("INSERT DATA {\n"); - sb.append(" <http://example/book1> dc:title \"A new book\" ;\n"); - sb.append(" dc:creator \"A.N.Other\" .\n"); - sb.append("}\n"); - - final String updateStr = sb.toString(); - - // Verify quorum is still valid. - quorum.assertQuorum(token); - - repos[0].prepareUpdate(updateStr).evaluate(); - - } - /* * Verify that query on all nodes is allowed and now provides a * non-empty result. Modified: branches/READ_CACHE/src/resources/HAJournal/HAJournal.config =================================================================== --- branches/READ_CACHE/src/resources/HAJournal/HAJournal.config 2013-03-21 17:03:29 UTC (rev 7028) +++ branches/READ_CACHE/src/resources/HAJournal/HAJournal.config 2013-03-21 18:50:05 UTC (rev 7029) @@ -51,16 +51,7 @@ /* * This is a sample configuration file for a highly available Journal. A * version of this file must be available to each HAJournalServer in the - * pipeline. The pipeline depends on the stable assignment of ServiceID - * to HAJournalServers. A unique ServiceID must be explicitly assigned to - * each HAJournalServer in its configuration entry. The ordered list of - * those ServiceIDs is shared by all services and defines the write - * replication pipeline. The first entry in the write replication pipeline - * is the leader (aka master). You can use UUID.randomUUID() or GenUUID - * to create UUIDs. - * - * Note: The ServiceUUID Entry MUST be different for each file. It assigns - * a ServiceID to the service! + * pipeline. */ /* @@ -321,8 +312,6 @@ bigdata.haPort ); - pipelineUUIDs = bigdata.pipeline; - replicationFactor = bigdata.replicationFactor; haLogDir = bigdata.haLogDir; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |