|
From: <tho...@us...> - 2014-01-31 18:37:45
|
Revision: 7838
http://bigdata.svn.sourceforge.net/bigdata/?rev=7838&view=rev
Author: thompsonbry
Date: 2014-01-31 18:37:34 +0000 (Fri, 31 Jan 2014)
Log Message:
-----------
Modified the HARestore utility to support the automatic detection of the most recent snapshot, extraction of the journal from that snapshot, and rollforward.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/NoSnapshotException.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java 2014-01-31 17:44:48 UTC (rev 7837)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java 2014-01-31 18:37:34 UTC (rev 7838)
@@ -26,10 +26,15 @@
import java.io.File;
import java.io.FileFilter;
import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
import java.util.Formatter;
import org.apache.log4j.Logger;
+import com.bigdata.ha.halog.IHALogReader;
+import com.bigdata.journal.jini.ha.SnapshotManager;
+
/**
* Utility class for operations on files that are named using a commit counter.
*
@@ -247,4 +252,91 @@
}
+ /**
+ * Find and return the {@link File} associated with the greatest commit
+ * counter. This uses a reverse order search to locate the most recent file
+ * very efficiently.
+ *
+ * @param f
+ * The root of the directory structure for the snapshot or HALog
+ * files.
+ * @param fileFilter
+ * Either the {@link SnapshotManager#SNAPSHOT_FILTER} or the
+ * {@link IHALogReader#HALOG_FILTER}.
+ *
+ * @return The file from the directory structure associated with the
+ * greatest commit counter.
+ *
+ * @throws IOException
+ */
+ public static File findGreatestCommitCounter(final File f,
+ final FileFilter fileFilter) throws IOException {
+
+ if (f == null)
+ throw new IllegalArgumentException();
+
+ if (fileFilter == null)
+ throw new IllegalArgumentException();
+
+ if (f.isDirectory()) {
+
+ final File[] files = f.listFiles(fileFilter);
+
+ /*
+ * Sort into (reverse) lexical order to force visitation in
+ * (reverse) lexical order.
+ *
+ * Note: This should work under any OS. Files will be either
+ * directory names (3 digits) or filenames (21 digits plus the file
+ * extension). Thus the comparison centers numerically on the digits
+ * that encode either part of a commit counter (subdirectory) or an
+ * entire commit counter (HALog file).
+ */
+ Arrays.sort(files,ReverseFileComparator.INSTANCE);
+
+ for (int i = 0; i < files.length; i++) {
+
+ final File tmp = findGreatestCommitCounter(files[i], fileFilter);
+
+ if (tmp != null) {
+
+ // Done.
+ return tmp;
+
+ }
+
+ }
+
+ } else if (fileFilter.accept(f)) {
+
+ // Match
+ return f;
+
+ }
+
+ // No match.
+ return null;
+
+ }
+
+ /**
+ * Impose a reverse sort on files.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan
+ * Thompson</a>
+ */
+ private static class ReverseFileComparator implements Comparator<File> {
+
+ @Override
+ public int compare(final File o1, final File o2) {
+
+ return o2.compareTo(o1);
+
+ }
+
+ /** Impose a reverse sort on files. */
+ private static final Comparator<File> INSTANCE = new ReverseFileComparator();
+
+ }
+
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java 2014-01-31 17:44:48 UTC (rev 7837)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java 2014-01-31 18:37:34 UTC (rev 7838)
@@ -27,6 +27,8 @@
package com.bigdata.journal;
import java.io.File;
+import java.io.FileFilter;
+import java.io.IOException;
import junit.framework.TestCase2;
@@ -63,4 +65,117 @@
}
+ public void test_findGreatestCommitCounter() throws IOException {
+
+ final String ext = ".tmp";
+
+ final FileFilter fileFilter = new FileFilter() {
+
+ @Override
+ public boolean accept(final File f) {
+ if (f.isDirectory()) {
+
+ return true;
+
+ }
+ return f.getName().endsWith(ext);
+ }
+
+ };
+
+ // temp directory for this test.
+ final File dir = File.createTempFile(getName(), "");
+ try {
+
+ if (!dir.delete())
+ fail("Could not delete: " + dir);
+ if (!dir.mkdirs())
+ fail("Could not create: " + dir);
+
+ final File f1 = CommitCounterUtility.getCommitCounterFile(dir, 1L,
+ ext);
+ final File f10 = CommitCounterUtility.getCommitCounterFile(dir,
+ 10L, ext);
+ final File f100 = CommitCounterUtility.getCommitCounterFile(dir,
+ 100L, ext);
+ final File f1000 = CommitCounterUtility.getCommitCounterFile(dir,
+ 1000L, ext);
+ final File f10000 = CommitCounterUtility.getCommitCounterFile(dir,
+ 10000L, ext);
+
+ // No files. Returns null.
+ assertEquals(null, CommitCounterUtility.findGreatestCommitCounter(
+ dir, fileFilter));
+
+ // Create directory structure.
+ if (!f10.getParentFile().mkdirs())
+ fail("Could not create directory structure: " + f1000);
+
+ // No files. Returns null.
+ assertEquals(null, CommitCounterUtility.findGreatestCommitCounter(
+ dir, fileFilter));
+
+ if (!f10.createNewFile())
+ fail("Could not create: " + f10);
+
+ // This is the only file. It should be returned.
+ assertEquals(f10, CommitCounterUtility.findGreatestCommitCounter(
+ dir, fileFilter));
+
+ // Create a file with a commit counter LT that file.
+ if (!f1.createNewFile())
+ fail("Could not create: " + f1);
+
+ // The return value should not change.
+ assertEquals(f10, CommitCounterUtility.findGreatestCommitCounter(
+ dir, fileFilter));
+
+ // Create a file with a larger commit counter.
+ if (!f100.createNewFile())
+ fail("Could not create: " + f100);
+
+ // That file should now be returned.
+ assertEquals(f100, CommitCounterUtility.findGreatestCommitCounter(
+ dir, fileFilter));
+
+ // Create a file with a larger commit counter. The commit counter
+ // will cause another directory to be created.
+ if (!f1000.getParentFile().mkdirs())
+ fail("Could not create directory structure: " + f1000);
+ if (!f1000.createNewFile())
+ fail("Could not create: " + f1000);
+
+ // That file should now be returned.
+ assertEquals(f1000, CommitCounterUtility.findGreatestCommitCounter(
+ dir, fileFilter));
+
+ // Create a new directory structure, but do not add a file. The new
+ // directory structure is ordered GT the existing files. For this
+ // case the algorithm needs to work backwards to see if it can find
+ // a non-empty directory.
+ if (!f10000.getParentFile().mkdirs())
+ fail("Could not create directory structure: " + f10000);
+
+ // The same file should be returned since the new dir is empty.
+ assertEquals(f1000, CommitCounterUtility.findGreatestCommitCounter(
+ dir, fileFilter));
+
+ // Add a file to that directory.
+ if (!f10000.createNewFile())
+ fail("Could not create: " + f10000);
+
+ // That file should be returned.
+ assertEquals(f10000,
+ CommitCounterUtility.findGreatestCommitCounter(dir,
+ fileFilter));
+
+ } finally {
+
+ CommitCounterUtility.recursiveDelete(false/* errorIfDeleteFails */,
+ dir, fileFilter);
+
+ }
+
+ }
+
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java 2014-01-31 17:44:48 UTC (rev 7837)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java 2014-01-31 18:37:34 UTC (rev 7838)
@@ -40,6 +40,7 @@
import com.bigdata.io.DirectBufferPool;
import com.bigdata.io.IBufferAccess;
import com.bigdata.io.writecache.WriteCache;
+import com.bigdata.journal.CommitCounterUtility;
import com.bigdata.journal.IHABufferStrategy;
import com.bigdata.journal.IRootBlockView;
import com.bigdata.journal.Journal;
@@ -58,9 +59,21 @@
*/
private static final Logger haLog = Logger.getLogger("com.bigdata.haLog");
+ /** The journal to be rolled forward. */
private final Journal journal;
+ /**
+ * The directory containing the HALog files to be applied to that journal.
+ */
private final File haLogDir;
+ /**
+ *
+ * @param journal
+ * The journal to be rolled forward.
+ * @param haLogDir
+ * The directory containing the HALog files to be applied to that
+ * journal.
+ */
public HARestore(final Journal journal, final File haLogDir) {
if (journal == null)
@@ -349,43 +362,54 @@
}
/**
- * Apply HALog file(s) to the journal. Each HALog file represents a single
- * native transaction on the database and will advance the journal by one
- * commit point. The journal will go through a local commit protocol as each
- * HALog is applied. HALogs will be applied starting with the first commit
- * point GT the current commit point on the journal. You may optionally
- * specify a stopping criteria, e.g., the last commit point that you wish to
- * restore. If no stopping criteria is specified, then all HALog files in
- * the specified directory will be applied and the journal will be rolled
- * forward to the most recent transaction. The HALog files are not removed,
- * making this process safe.
+ * Apply HALog file(s) to a journal or snapshot file. If the file specified
+ * is a snapshot, then it is uncompressed into the current working directory
+ * to obtain a journal file and the HALogs are applied to that journal. If
+ * the file specified is a journal, then the HALog files are simply rolled
+ * forward against that journal. If the file is a directory, it is assumed
+ * to be the snapshot directory. In this case, the most recent snapshot file
+ * is located, decompressed to obtain a journal file, and then rolled
+ * forward by applying any more recent HALog files.
+ * <p>
+ * Each HALog file represents a single native transaction on the database
+ * and will advance the journal by one commit point. The journal will go
+ * through a local commit protocol as each HALog is applied. HALogs will be
+ * applied starting with the first commit point GT the current commit point
+ * on the journal. You may optionally specify a stopping criteria, e.g., the
+ * last commit point that you wish to restore. If no stopping criteria is
+ * specified, then all HALog files in the specified directory will be
+ * applied and the journal will be rolled forward to the most recent
+ * transaction. The HALog files are not removed, making this process safe.
*
* @param args
- * <code>[options] journalFile haLogDir</code><br>
+ * <code>[options] journalOrSnapshotFileOrSnapshotDir haLogDir</code>
+ * <br>
* where <code>journalFile</code> is the name of the journal file<br>
* where <code>haLogDir</code> is the name of a directory
* containing zero or more HALog files<br>
* where <code>options</code> are any of:
* <dl>
- * <dt>-l</dt>
- * <dd>List available commit points, but do not apply them. This
- * option provides information about the current commit point on
- * the journal and the commit points available in the HALog
- * files.</dd>
- * <dt>-h commitCounter</dt>
- * <dd>The last commit counter that will be applied (halting
- * point for restore).</dd>
+ * <dt>-l</dt> <dd>List available commit points, but do not apply
+ * them. This option provides information about the current
+ * commit point on the journal and the commit points available in
+ * the HALog files.</dd> <dt>-h commitCounter</dt> <dd>The last
+ * commit counter that will be applied (halting point for
+ * restore).</dd>
* </dl>
*
* @return <code>0</code> iff the operation was fully successful.
- * @throws IOException
*
- * @throws Exception
+ * @throws IOException
+ * if an error occcur when reading an HALog or writing on the
+ * journal.
+ * @throws NoSnapshotException
+ * if you specify a snapshot directory to be searched, but no
+ * snapshot files are found. This can happend you specify the
+ * wrong directory. It can also happen if you are using the
+ * {@link NoSnapshotPolicy} and never took a snapshot!
+ * @throws RuntimeException
* if the {@link UUID}s or other critical metadata of the
* journal and the HALogs differ.
- * @throws Exception
- * if an error occcur when reading an HALog or writing on the
- * journal.
*/
public static void main(final String[] args) throws IOException {
@@ -446,13 +470,47 @@
// HALogDir.
final File haLogDir = new File(args[i++]);
- /*
- * Decompress the snapshot onto a temporary file in the current working
- * directory.
- */
+ if(journalFile.isDirectory()) {
+ /*
+ * File is a directory.
+ *
+ * Locate the most recent snapshot in that directory structure.
+ */
+
+ File tmp = CommitCounterUtility.findGreatestCommitCounter(
+ journalFile, SnapshotManager.SNAPSHOT_FILTER);
+
+ if (tmp == null) {
+
+ /*
+ * There are no snapshot files.
+ *
+ * Note: This can happen if you specify the wrong directory. It
+ * can also happen if you are using the NoSnapshotPolicy and
+ * never took a snapshot!
+ */
+
+ throw new NoSnapshotException("No snapshot file(s): "
+ + journalFile);
+
+ }
+
+ System.out.println("Most recent snapshot: " + tmp);
+
+ journalFile = tmp;
+
+ }
+
if (journalFile.getName().endsWith(SnapshotManager.SNAPSHOT_EXT)) {
+ /*
+ * File is a snapshot.
+ *
+ * Decompress the snapshot onto a temporary file in the current
+ * working directory.
+ */
+
// source is the snapshot.
final File in = journalFile;
@@ -541,6 +599,12 @@
}
+ private static void usage(final String[] args) {
+
+ System.err.println("usage: (-l|-h commitPoint) <journalFile> haLogDir");
+
+ }
+
/**
* Verify that the HALog root block is consistent with the Journal's root
* block.
@@ -578,10 +642,4 @@
}
- private static void usage(final String[] args) {
-
- System.err.println("usage: (-l|-h commitPoint) <journalFile> haLogDir");
-
- }
-
}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/NoSnapshotException.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/NoSnapshotException.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/NoSnapshotException.java 2014-01-31 18:37:34 UTC (rev 7838)
@@ -0,0 +1,55 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.journal.jini.ha;
+
+import java.io.IOException;
+
+/**
+ * An instance of this exception is thrown if the {@link HARestore} class is
+ * unable to locate a snapshot file. This can happend you specify the wrong
+ * directory. It can also happen if you are using the {@link NoSnapshotPolicy}
+ * and never took a snapshot!
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+public class NoSnapshotException extends IOException {
+
+ private static final long serialVersionUID = 1L;
+
+ public NoSnapshotException() {
+ super();
+ }
+
+ public NoSnapshotException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ public NoSnapshotException(String message) {
+ super(message);
+ }
+
+ public NoSnapshotException(Throwable cause) {
+ super(cause);
+ }
+}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|