|
From: <tho...@us...> - 2014-01-31 18:37:45
|
Revision: 7838
http://bigdata.svn.sourceforge.net/bigdata/?rev=7838&view=rev
Author: thompsonbry
Date: 2014-01-31 18:37:34 +0000 (Fri, 31 Jan 2014)
Log Message:
-----------
Modified the HARestore utility to support the automatic detection of the most recent snapshot, extraction of the journal from that snapshot, and rollforward.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/NoSnapshotException.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java 2014-01-31 17:44:48 UTC (rev 7837)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java 2014-01-31 18:37:34 UTC (rev 7838)
@@ -26,10 +26,15 @@
import java.io.File;
import java.io.FileFilter;
import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
import java.util.Formatter;
import org.apache.log4j.Logger;
+import com.bigdata.ha.halog.IHALogReader;
+import com.bigdata.journal.jini.ha.SnapshotManager;
+
/**
* Utility class for operations on files that are named using a commit counter.
*
@@ -247,4 +252,91 @@
}
+ /**
+ * Find and return the {@link File} associated with the greatest commit
+ * counter. This uses a reverse order search to locate the most recent file
+ * very efficiently.
+ *
+ * @param f
+ * The root of the directory structure for the snapshot or HALog
+ * files.
+ * @param fileFilter
+ * Either the {@link SnapshotManager#SNAPSHOT_FILTER} or the
+ * {@link IHALogReader#HALOG_FILTER}.
+ *
+ * @return The file from the directory structure associated with the
+ * greatest commit counter.
+ *
+ * @throws IOException
+ */
+ public static File findGreatestCommitCounter(final File f,
+ final FileFilter fileFilter) throws IOException {
+
+ if (f == null)
+ throw new IllegalArgumentException();
+
+ if (fileFilter == null)
+ throw new IllegalArgumentException();
+
+ if (f.isDirectory()) {
+
+ final File[] files = f.listFiles(fileFilter);
+
+ /*
+ * Sort into (reverse) lexical order to force visitation in
+ * (reverse) lexical order.
+ *
+ * Note: This should work under any OS. Files will be either
+ * directory names (3 digits) or filenames (21 digits plus the file
+ * extension). Thus the comparison centers numerically on the digits
+ * that encode either part of a commit counter (subdirectory) or an
+ * entire commit counter (HALog file).
+ */
+ Arrays.sort(files,ReverseFileComparator.INSTANCE);
+
+ for (int i = 0; i < files.length; i++) {
+
+ final File tmp = findGreatestCommitCounter(files[i], fileFilter);
+
+ if (tmp != null) {
+
+ // Done.
+ return tmp;
+
+ }
+
+ }
+
+ } else if (fileFilter.accept(f)) {
+
+ // Match
+ return f;
+
+ }
+
+ // No match.
+ return null;
+
+ }
+
+ /**
+ * Impose a reverse sort on files.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan
+ * Thompson</a>
+ */
+ private static class ReverseFileComparator implements Comparator<File> {
+
+ @Override
+ public int compare(final File o1, final File o2) {
+
+ return o2.compareTo(o1);
+
+ }
+
+ /** Impose a reverse sort on files. */
+ private static final Comparator<File> INSTANCE = new ReverseFileComparator();
+
+ }
+
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java 2014-01-31 17:44:48 UTC (rev 7837)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java 2014-01-31 18:37:34 UTC (rev 7838)
@@ -27,6 +27,8 @@
package com.bigdata.journal;
import java.io.File;
+import java.io.FileFilter;
+import java.io.IOException;
import junit.framework.TestCase2;
@@ -63,4 +65,117 @@
}
+ public void test_findGreatestCommitCounter() throws IOException {
+
+ final String ext = ".tmp";
+
+ final FileFilter fileFilter = new FileFilter() {
+
+ @Override
+ public boolean accept(final File f) {
+ if (f.isDirectory()) {
+
+ return true;
+
+ }
+ return f.getName().endsWith(ext);
+ }
+
+ };
+
+ // temp directory for this test.
+ final File dir = File.createTempFile(getName(), "");
+ try {
+
+ if (!dir.delete())
+ fail("Could not delete: " + dir);
+ if (!dir.mkdirs())
+ fail("Could not create: " + dir);
+
+ final File f1 = CommitCounterUtility.getCommitCounterFile(dir, 1L,
+ ext);
+ final File f10 = CommitCounterUtility.getCommitCounterFile(dir,
+ 10L, ext);
+ final File f100 = CommitCounterUtility.getCommitCounterFile(dir,
+ 100L, ext);
+ final File f1000 = CommitCounterUtility.getCommitCounterFile(dir,
+ 1000L, ext);
+ final File f10000 = CommitCounterUtility.getCommitCounterFile(dir,
+ 10000L, ext);
+
+ // No files. Returns null.
+ assertEquals(null, CommitCounterUtility.findGreatestCommitCounter(
+ dir, fileFilter));
+
+ // Create directory structure.
+ if (!f10.getParentFile().mkdirs())
+ fail("Could not create directory structure: " + f1000);
+
+ // No files. Returns null.
+ assertEquals(null, CommitCounterUtility.findGreatestCommitCounter(
+ dir, fileFilter));
+
+ if (!f10.createNewFile())
+ fail("Could not create: " + f10);
+
+ // This is the only file. It should be returned.
+ assertEquals(f10, CommitCounterUtility.findGreatestCommitCounter(
+ dir, fileFilter));
+
+ // Create a file with a commit counter LT that file.
+ if (!f1.createNewFile())
+ fail("Could not create: " + f1);
+
+ // The return value should not change.
+ assertEquals(f10, CommitCounterUtility.findGreatestCommitCounter(
+ dir, fileFilter));
+
+ // Create a file with a larger commit counter.
+ if (!f100.createNewFile())
+ fail("Could not create: " + f100);
+
+ // That file should now be returned.
+ assertEquals(f100, CommitCounterUtility.findGreatestCommitCounter(
+ dir, fileFilter));
+
+ // Create a file with a larger commit counter. The commit counter
+ // will cause another directory to be created.
+ if (!f1000.getParentFile().mkdirs())
+ fail("Could not create directory structure: " + f1000);
+ if (!f1000.createNewFile())
+ fail("Could not create: " + f1000);
+
+ // That file should now be returned.
+ assertEquals(f1000, CommitCounterUtility.findGreatestCommitCounter(
+ dir, fileFilter));
+
+ // Create a new directory structure, but do not add a file. The new
+ // directory structure is ordered GT the existing files. For this
+ // case the algorithm needs to work backwards to see if it can find
+ // a non-empty directory.
+ if (!f10000.getParentFile().mkdirs())
+ fail("Could not create directory structure: " + f10000);
+
+ // The same file should be returned since the new dir is empty.
+ assertEquals(f1000, CommitCounterUtility.findGreatestCommitCounter(
+ dir, fileFilter));
+
+ // Add a file to that directory.
+ if (!f10000.createNewFile())
+ fail("Could not create: " + f10000);
+
+ // That file should be returned.
+ assertEquals(f10000,
+ CommitCounterUtility.findGreatestCommitCounter(dir,
+ fileFilter));
+
+ } finally {
+
+ CommitCounterUtility.recursiveDelete(false/* errorIfDeleteFails */,
+ dir, fileFilter);
+
+ }
+
+ }
+
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java 2014-01-31 17:44:48 UTC (rev 7837)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java 2014-01-31 18:37:34 UTC (rev 7838)
@@ -40,6 +40,7 @@
import com.bigdata.io.DirectBufferPool;
import com.bigdata.io.IBufferAccess;
import com.bigdata.io.writecache.WriteCache;
+import com.bigdata.journal.CommitCounterUtility;
import com.bigdata.journal.IHABufferStrategy;
import com.bigdata.journal.IRootBlockView;
import com.bigdata.journal.Journal;
@@ -58,9 +59,21 @@
*/
private static final Logger haLog = Logger.getLogger("com.bigdata.haLog");
+ /** The journal to be rolled forward. */
private final Journal journal;
+ /**
+ * The directory containing the HALog files to be applied to that journal.
+ */
private final File haLogDir;
+ /**
+ *
+ * @param journal
+ * The journal to be rolled forward.
+ * @param haLogDir
+ * The directory containing the HALog files to be applied to that
+ * journal.
+ */
public HARestore(final Journal journal, final File haLogDir) {
if (journal == null)
@@ -349,43 +362,54 @@
}
/**
- * Apply HALog file(s) to the journal. Each HALog file represents a single
- * native transaction on the database and will advance the journal by one
- * commit point. The journal will go through a local commit protocol as each
- * HALog is applied. HALogs will be applied starting with the first commit
- * point GT the current commit point on the journal. You may optionally
- * specify a stopping criteria, e.g., the last commit point that you wish to
- * restore. If no stopping criteria is specified, then all HALog files in
- * the specified directory will be applied and the journal will be rolled
- * forward to the most recent transaction. The HALog files are not removed,
- * making this process safe.
+ * Apply HALog file(s) to a journal or snapshot file. If the file specified
+ * is a snapshot, then it is uncompressed into the current working directory
+ * to obtain a journal file and the HALogs are applied to that journal. If
+ * the file specified is a journal, then the HALog files are simply rolled
+ * forward against that journal. If the file is a directory, it is assumed
+ * to be the snapshot directory. In this case, the most recent snapshot file
+ * is located, decompressed to obtain a journal file, and then rolled
+ * forward by applying any more recent HALog files.
+ * <p>
+ * Each HALog file represents a single native transaction on the database
+ * and will advance the journal by one commit point. The journal will go
+ * through a local commit protocol as each HALog is applied. HALogs will be
+ * applied starting with the first commit point GT the current commit point
+ * on the journal. You may optionally specify a stopping criteria, e.g., the
+ * last commit point that you wish to restore. If no stopping criteria is
+ * specified, then all HALog files in the specified directory will be
+ * applied and the journal will be rolled forward to the most recent
+ * transaction. The HALog files are not removed, making this process safe.
*
* @param args
- * <code>[options] journalFile haLogDir</code><br>
+ * <code>[options] journalOrSnapshotFileOrSnapshotDir haLogDir</code>
+ * <br>
* where <code>journalFile</code> is the name of the journal file<br>
* where <code>haLogDir</code> is the name of a directory
* containing zero or more HALog files<br>
* where <code>options</code> are any of:
* <dl>
- * <dt>-l</dt>
- * <dd>List available commit points, but do not apply them. This
- * option provides information about the current commit point on
- * the journal and the commit points available in the HALog
- * files.</dd>
- * <dt>-h commitCounter</dt>
- * <dd>The last commit counter that will be applied (halting
- * point for restore).</dd>
+ * <dt>-l</dt> <dd>List available commit points, but do not apply
+ * them. This option provides information about the current
+ * commit point on the journal and the commit points available in
+ * the HALog files.</dd> <dt>-h commitCounter</dt> <dd>The last
+ * commit counter that will be applied (halting point for
+ * restore).</dd>
* </dl>
*
* @return <code>0</code> iff the operation was fully successful.
- * @throws IOException
*
- * @throws Exception
+ * @throws IOException
+ * if an error occcur when reading an HALog or writing on the
+ * journal.
+ * @throws NoSnapshotException
+ * if you specify a snapshot directory to be searched, but no
+ * snapshot files are found. This can happend you specify the
+ * wrong directory. It can also happen if you are using the
+ * {@link NoSnapshotPolicy} and never took a snapshot!
+ * @throws RuntimeException
* if the {@link UUID}s or other critical metadata of the
* journal and the HALogs differ.
- * @throws Exception
- * if an error occcur when reading an HALog or writing on the
- * journal.
*/
public static void main(final String[] args) throws IOException {
@@ -446,13 +470,47 @@
// HALogDir.
final File haLogDir = new File(args[i++]);
- /*
- * Decompress the snapshot onto a temporary file in the current working
- * directory.
- */
+ if(journalFile.isDirectory()) {
+ /*
+ * File is a directory.
+ *
+ * Locate the most recent snapshot in that directory structure.
+ */
+
+ File tmp = CommitCounterUtility.findGreatestCommitCounter(
+ journalFile, SnapshotManager.SNAPSHOT_FILTER);
+
+ if (tmp == null) {
+
+ /*
+ * There are no snapshot files.
+ *
+ * Note: This can happen if you specify the wrong directory. It
+ * can also happen if you are using the NoSnapshotPolicy and
+ * never took a snapshot!
+ */
+
+ throw new NoSnapshotException("No snapshot file(s): "
+ + journalFile);
+
+ }
+
+ System.out.println("Most recent snapshot: " + tmp);
+
+ journalFile = tmp;
+
+ }
+
if (journalFile.getName().endsWith(SnapshotManager.SNAPSHOT_EXT)) {
+ /*
+ * File is a snapshot.
+ *
+ * Decompress the snapshot onto a temporary file in the current
+ * working directory.
+ */
+
// source is the snapshot.
final File in = journalFile;
@@ -541,6 +599,12 @@
}
+ private static void usage(final String[] args) {
+
+ System.err.println("usage: (-l|-h commitPoint) <journalFile> haLogDir");
+
+ }
+
/**
* Verify that the HALog root block is consistent with the Journal's root
* block.
@@ -578,10 +642,4 @@
}
- private static void usage(final String[] args) {
-
- System.err.println("usage: (-l|-h commitPoint) <journalFile> haLogDir");
-
- }
-
}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/NoSnapshotException.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/NoSnapshotException.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/NoSnapshotException.java 2014-01-31 18:37:34 UTC (rev 7838)
@@ -0,0 +1,55 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.journal.jini.ha;
+
+import java.io.IOException;
+
+/**
+ * An instance of this exception is thrown if the {@link HARestore} class is
+ * unable to locate a snapshot file. This can happend you specify the wrong
+ * directory. It can also happen if you are using the {@link NoSnapshotPolicy}
+ * and never took a snapshot!
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+public class NoSnapshotException extends IOException {
+
+ private static final long serialVersionUID = 1L;
+
+ public NoSnapshotException() {
+ super();
+ }
+
+ public NoSnapshotException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ public NoSnapshotException(String message) {
+ super(message);
+ }
+
+ public NoSnapshotException(Throwable cause) {
+ super(cause);
+ }
+}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-03-04 20:46:25
|
Revision: 7911
http://sourceforge.net/p/bigdata/code/7911
Author: thompsonbry
Date: 2014-03-04 20:46:21 +0000 (Tue, 04 Mar 2014)
Log Message:
-----------
Committing to CI for #730. I still need to test the benchmarking scripts and scale-out query.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ConfigParams.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml
branches/BIGDATA_RELEASE_1_3_0/build.xml
branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config
branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4jHA.properties
branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/startHAServices.config
branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices
branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/bigdata/bigdataHA.config
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/RWStore.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/classes/
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/classes/log4j.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/jetty.xml
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/index.html
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/result-to-html.xsl
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WebAppUnassembled.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/result-to-html.xsl
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/resources/RWStore.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/resources/WEB-INF/
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/resources/log4j.properties
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-03-04 20:38:39 UTC (rev 7910)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-03-04 20:46:21 UTC (rev 7911)
@@ -60,9 +60,6 @@
private static fedname = "benchmark";
- // NanoSparqlServer (http) port.
- private static nssPort = 8090;
-
// write replication pipeline port (listener).
private static haPort = 9090;
@@ -276,20 +273,3 @@
}, bigdata.kb);
}
-
-/*
- * NanoSparqlServer configuration.
- */
-com.bigdata.rdf.sail.webapp.NanoSparqlServer {
-
- namespace = bigdata.namespace;
-
- create = true;
-
- queryThreadPoolSize = 16;
-
- describeEachNamedGraph = true;
-
- port = bigdata.nssPort;
-
-}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config 2014-03-04 20:38:39 UTC (rev 7910)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config 2014-03-04 20:46:21 UTC (rev 7911)
@@ -275,20 +275,3 @@
}, bigdata.kb);
}
-
-/*
- * NanoSparqlServer configuration.
- */
-com.bigdata.rdf.sail.webapp.NanoSparqlServer {
-
- namespace = bigdata.namespace;
-
- create = true;
-
- queryThreadPoolSize = 16;
-
- describeEachNamedGraph = true;
-
- port = bigdata.nssPort;
-
-}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-03-04 20:38:39 UTC (rev 7910)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-03-04 20:46:21 UTC (rev 7911)
@@ -60,9 +60,6 @@
private static fedname = "benchmark";
- // NanoSparqlServer (http) port.
- private static nssPort = ConfigMath.add(8090,2);
-
// write replication pipeline port (listener).
private static haPort = ConfigMath.add(9090,2);
@@ -275,20 +272,3 @@
}, bigdata.kb);
}
-
-/*
- * NanoSparqlServer configuration.
- */
-com.bigdata.rdf.sail.webapp.NanoSparqlServer {
-
- namespace = bigdata.namespace;
-
- create = true;
-
- queryThreadPoolSize = 16;
-
- describeEachNamedGraph = true;
-
- port = bigdata.nssPort;
-
-}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2014-03-04 20:38:39 UTC (rev 7910)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2014-03-04 20:46:21 UTC (rev 7911)
@@ -93,7 +93,6 @@
import com.bigdata.journal.ITx;
import com.bigdata.journal.Journal;
import com.bigdata.journal.jini.ha.HAJournalServer.HAQuorumService;
-import com.bigdata.journal.jini.ha.HAJournalServer.NSSConfigurationOptions;
import com.bigdata.journal.jini.ha.HAJournalServer.RunStateEnum;
import com.bigdata.quorum.Quorum;
import com.bigdata.resources.StoreManager.IStoreManagerCounters;
@@ -2193,26 +2192,43 @@
* Misc.
*/
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Note: The actual port depends on how jetty was configured in
+ * <code>jetty.xml</code>. This returns the port associated with the
+ * first jetty connection.
+ *
+ * @see <a
+ * href="http://wiki.eclipse.org/Jetty/Tutorial/Embedding_Jetty">
+ * Embedding Jetty </a>
+ */
@Override
public int getNSSPort() {
- final String COMPONENT = NSSConfigurationOptions.COMPONENT;
+ return server.getNSSPort();
- try {
-
- final Integer port = (Integer) server.config.getEntry(
- COMPONENT, NSSConfigurationOptions.PORT, Integer.TYPE,
- NSSConfigurationOptions.DEFAULT_PORT);
-
- return port;
-
- } catch (ConfigurationException e) {
-
- throw new RuntimeException(e);
-
- }
-
}
+// @Override
+// public int getNSSPort() {
+//
+// final String COMPONENT = NSSConfigurationOptions.COMPONENT;
+//
+// try {
+//
+// final Integer port = (Integer) server.config.getEntry(
+// COMPONENT, NSSConfigurationOptions.PORT, Integer.TYPE,
+// NSSConfigurationOptions.DEFAULT_PORT);
+//
+// return port;
+//
+// } catch (ConfigurationException e) {
+//
+// throw new RuntimeException(e);
+//
+// }
+//
+// }
@Override
public RunState getRunState() {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-03-04 20:38:39 UTC (rev 7910)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-03-04 20:46:21 UTC (rev 7911)
@@ -33,9 +33,7 @@
import java.nio.ByteBuffer;
import java.nio.channels.ClosedByInterruptException;
import java.rmi.Remote;
-import java.util.LinkedHashMap;
import java.util.List;
-import java.util.Map;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
@@ -59,6 +57,7 @@
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.ACL;
import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.webapp.WebAppContext;
import com.bigdata.concurrent.FutureTaskMon;
import com.bigdata.ha.HAGlue;
@@ -440,28 +439,60 @@
*/
boolean DEFAULT_ONLINE_DISASTER_RECOVERY = false;
- }
-
- /**
- * Configuration options for the {@link NanoSparqlServer}.
- */
- public interface NSSConfigurationOptions extends ConfigParams {
-
- String COMPONENT = NanoSparqlServer.class.getName();
-
/**
- * The port at which the embedded {@link NanoSparqlServer} will respond
- * to HTTP requests (default {@value #DEFAULT_PORT}). This MAY be ZERO
- * (0) to use a random open port.
+ * The location of the <code>jetty.xml</code> file that will be used to
+ * configure jetty (default {@value #DEFAULT_JETTY_XML}).
*
- * TODO We should be able to specify the interface, not just the port. Is
- * there any way to do that with jetty?
+ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/730" >
+ * Allow configuration of embedded NSS jetty server using
+ * jetty-web.xml </a>
+ *
+ * @see #DEFAULT_JETTY_XML
*/
- String PORT = "port";
+ String JETTY_XML = "jettyXml";
- int DEFAULT_PORT = 8080;
-
+ /**
+ * The default value works when deployed under the IDE with the
+ * <code>bigdata-war/src</code> directory on the classpath. When
+ * deploying outside of that context, the value needs to be set
+ * explicitly.
+ */
+ String DEFAULT_JETTY_XML = "WEB-INF/jetty.xml";
+
}
+
+// /**
+// * Configuration options for the {@link NanoSparqlServer}.
+// *
+// * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/730" >
+// * Allow configuration of embedded NSS jetty server using jetty-web.xml
+// * </a>
+// */
+// @Deprecated
+// public interface NSSConfigurationOptions extends ConfigParams {
+//
+// @Deprecated
+// String COMPONENT = NanoSparqlServer.class.getName();
+//
+// /**
+// * The port at which the embedded {@link NanoSparqlServer} will respond
+// * to HTTP requests (default {@value #DEFAULT_PORT}). This MAY be ZERO
+// * (0) to use a random open port.
+// *
+// * @deprecated This has been replaced by the use of <code>web.xml</code>
+// * and <code>jetty.xml</code>.
+// *
+// * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/730" >
+// * Allow configuration of embedded NSS jetty server using
+// * jetty-web.xml </a>
+// */
+// @Deprecated
+// String PORT = "port";
+//
+// @Deprecated
+// int DEFAULT_PORT = 8080;
+//
+// }
/**
* The journal.
@@ -4466,65 +4497,85 @@
* Note: We need to wait for a quorum meet since this will create the KB
* instance if it does not exist and we can not write on the
* {@link HAJournal} until we have a quorum meet.
+ *
+ * @see <a href="http://wiki.eclipse.org/Jetty/Tutorial/Embedding_Jetty">
+ * Embedding Jetty </a>
+ * @see <a href="http://trac.bigdata.com/ticket/730" > Allow configuration
+ * of embedded NSS jetty server using jetty-web.xml </a>
*/
private void startNSS() {
try {
- final String COMPONENT = NSSConfigurationOptions.COMPONENT;
+ if (jettyServer != null && jettyServer.isRunning()) {
- final String namespace = (String) config.getEntry(COMPONENT,
- NSSConfigurationOptions.NAMESPACE, String.class,
- NSSConfigurationOptions.DEFAULT_NAMESPACE);
+ throw new RuntimeException("Already running");
- final Integer queryPoolThreadSize = (Integer) config.getEntry(
- COMPONENT, NSSConfigurationOptions.QUERY_THREAD_POOL_SIZE,
- Integer.TYPE,
- NSSConfigurationOptions.DEFAULT_QUERY_THREAD_POOL_SIZE);
-
- final boolean create = (Boolean) config.getEntry(COMPONENT,
- NSSConfigurationOptions.CREATE, Boolean.TYPE,
- NSSConfigurationOptions.DEFAULT_CREATE);
-
- final Integer port = (Integer) config.getEntry(COMPONENT,
- NSSConfigurationOptions.PORT, Integer.TYPE,
- NSSConfigurationOptions.DEFAULT_PORT);
-
- final String servletContextListenerClass = (String) config
- .getEntry(
- COMPONENT,
- NSSConfigurationOptions.SERVLET_CONTEXT_LISTENER_CLASS,
- String.class,
- NSSConfigurationOptions.DEFAULT_SERVLET_CONTEXT_LISTENER_CLASS);
-
- log.warn("Starting NSS: port=" + port);
-
- final Map<String, String> initParams = new LinkedHashMap<String, String>();
- {
-
- initParams.put(ConfigParams.NAMESPACE, namespace);
-
- initParams.put(ConfigParams.QUERY_THREAD_POOL_SIZE,
- queryPoolThreadSize.toString());
-
- // Note: Create will be handled by the QuorumListener (above).
- initParams.put(ConfigParams.CREATE, Boolean.toString(create));
-
- initParams.put(ConfigParams.SERVLET_CONTEXT_LISTENER_CLASS,
- servletContextListenerClass);
-
}
- if (jettyServer != null && jettyServer.isRunning()) {
+// if(!USE_WEB_XML) {
+//
+// final String COMPONENT = NSSConfigurationOptions.COMPONENT;
+//
+// final String namespace = (String) config.getEntry(COMPONENT,
+// NSSConfigurationOptions.NAMESPACE, String.class,
+// NSSConfigurationOptions.DEFAULT_NAMESPACE);
+//
+// final Integer queryPoolThreadSize = (Integer) config.getEntry(
+// COMPONENT, NSSConfigurationOptions.QUERY_THREAD_POOL_SIZE,
+// Integer.TYPE,
+// NSSConfigurationOptions.DEFAULT_QUERY_THREAD_POOL_SIZE);
+//
+// final boolean create = (Boolean) config.getEntry(COMPONENT,
+// NSSConfigurationOptions.CREATE, Boolean.TYPE,
+// NSSConfigurationOptions.DEFAULT_CREATE);
+//
+// final Integer port = (Integer) config.getEntry(COMPONENT,
+// NSSConfigurationOptions.PORT, Integer.TYPE,
+// NSSConfigurationOptions.DEFAULT_PORT);
+//
+// final String servletContextListenerClass = (String) config
+// .getEntry(
+// COMPONENT,
+// NSSConfigurationOptions.SERVLET_CONTEXT_LISTENER_CLASS,
+// String.class,
+// NSSConfigurationOptions.DEFAULT_SERVLET_CONTEXT_LISTENER_CLASS);
+//
+// final Map<String, String> initParams = new LinkedHashMap<String, String>();
+// {
+//
+// initParams.put(ConfigParams.NAMESPACE, namespace);
+//
+// initParams.put(ConfigParams.QUERY_THREAD_POOL_SIZE,
+// queryPoolThreadSize.toString());
+//
+// // Note: Create will be handled by the QuorumListener (above).
+// initParams.put(ConfigParams.CREATE, Boolean.toString(create));
+//
+// initParams.put(ConfigParams.SERVLET_CONTEXT_LISTENER_CLASS,
+// servletContextListenerClass);
+//
+// }
+//
+// // Setup the embedded jetty server for NSS webapp.
+// jettyServer = NanoSparqlServer.newInstance(port, journal,
+// initParams);
+//
+// } else {
- throw new RuntimeException("Already running");
+ // The location of the jetty.xml file.
+ final String jettyXml = (String) config.getEntry(
+ ConfigurationOptions.COMPONENT,
+ ConfigurationOptions.JETTY_XML, String.class,
+ ConfigurationOptions.DEFAULT_JETTY_XML);
- }
+ // Setup the embedded jetty server for NSS webapp.
+ jettyServer = NanoSparqlServer.newInstance(jettyXml, journal);
- // Setup the embedded jetty server for NSS webapp.
- jettyServer = NanoSparqlServer.newInstance(port, journal,
- initParams);
+// }
+ log.warn("Starting NSS");
+
// Start the server.
jettyServer.start();
@@ -4539,8 +4590,9 @@
final String serviceURL;
{
- final int actualPort = jettyServer.getConnectors()[0]
- .getLocalPort();
+ final int actualPort = getNSSPort();
+// final int actualPort = jettyServer.getConnectors()[0]
+// .getLocalPort();
String hostAddr = NicUtil.getIpAddress("default.nic",
"default", true/* loopbackOk */);
@@ -4560,7 +4612,7 @@
System.out.println(msg);
if (log.isInfoEnabled())
- log.info(msg);
+ log.warn(msg);
}
@@ -4573,10 +4625,49 @@
}
+// /**
+// * When <code>true</code>, the {@link HAJournalServer} will use
+// * <code>jetty.xml</code> and <code>web.xml</code> to configure the
+// * {@link NanoSparqlServer}.
+// *
+// * @see <a href="http://wiki.eclipse.org/Jetty/Tutorial/Embedding_Jetty">
+// * Embedding Jetty </a>
+// * @see <a href="http://trac.bigdata.com/ticket/730" > Allow configuration
+// * of embedded NSS jetty server using jetty-web.xml </a>
+// *
+// * @deprecated Once #730 is closed, get rid of this and the old code paths
+// * in the method above and in the {@link NanoSparqlServer}.
+// */
+// private final boolean USE_WEB_XML = true;
+
/**
- * Conditionally create the default KB instance as identified by the
- * {@link NSSConfigurationOptions}.
+ * The actual port depends on how jetty was configured in
+ * <code>jetty.xml</code>. This returns the port associated with the first
+ * connection for the jetty {@link Server}.
*
+ * @return The port associated with the first connection for the jetty
+ * {@link Server}.
+ *
+ * @throws IllegalArgumentException
+ * if the jetty {@link Server} is not running.
+ */
+ int getNSSPort() {
+
+ final Server tmp = jettyServer;
+
+ if (tmp == null)
+ throw new IllegalStateException("Server is not running");
+
+ return tmp.getConnectors()[0].getLocalPort();
+
+ }
+
+ /**
+ * Conditionally create the default KB instance as identified in
+ * <code>web.xml</code>.
+ *
+ * @see ConfigParams
+ *
* @throws ConfigurationException
* @throws ExecutionException
* @throws InterruptedException
@@ -4584,16 +4675,60 @@
private void conditionalCreateDefaultKB() throws ConfigurationException,
InterruptedException, ExecutionException {
- final String COMPONENT = NSSConfigurationOptions.COMPONENT;
+ final Server server = this.jettyServer;
- final String namespace = (String) config.getEntry(COMPONENT,
- NSSConfigurationOptions.NAMESPACE, String.class,
- NSSConfigurationOptions.DEFAULT_NAMESPACE);
+ if (server == null)
+ throw new IllegalStateException();
- final boolean create = (Boolean) config.getEntry(COMPONENT,
- NSSConfigurationOptions.CREATE, Boolean.TYPE,
- NSSConfigurationOptions.DEFAULT_CREATE);
+ /*
+ * TODO This currently relies on the WebAppContext's initParams. This is
+ * somewhat fragile, but that is where this information is declared.
+ */
+ final WebAppContext wac = NanoSparqlServer.getWebApp(server);
+ if (wac == null)
+ throw new RuntimeException("Could not locate webapp.");
+
+ final String namespace;
+ {
+
+ String s = wac.getInitParameter(ConfigParams.NAMESPACE);
+
+ if (s == null)
+ s = ConfigParams.DEFAULT_NAMESPACE;
+
+ namespace = s;
+
+ if (log.isInfoEnabled())
+ log.info(ConfigParams.NAMESPACE + "=" + namespace);
+
+ }
+
+ final boolean create;
+ {
+
+ final String s = wac.getInitParameter(ConfigParams.CREATE);
+
+ if (s != null)
+ create = Boolean.valueOf(s);
+ else
+ create = ConfigParams.DEFAULT_CREATE;
+
+ if (log.isInfoEnabled())
+ log.info(ConfigParams.CREATE + "=" + create);
+
+ }
+
+// final String COMPONENT = NSSConfigurationOptions.COMPONENT;
+//
+// final String namespace = (String) config.getEntry(COMPONENT,
+// NSSConfigurationOptions.NAMESPACE, String.class,
+// NSSConfigurationOptions.DEFAULT_NAMESPACE);
+//
+// final boolean create = (Boolean) config.getEntry(COMPONENT,
+// NSSConfigurationOptions.CREATE, Boolean.TYPE,
+// NSSConfigurationOptions.DEFAULT_CREATE);
+
if (create) {
final Future<Void> ft = journal.getExecutorService().submit(
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-03-04 20:38:39 UTC (rev 7910)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-03-04 20:46:21 UTC (rev 7911)
@@ -1,3202 +1,3265 @@
-/**
-
-Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-package com.bigdata.journal.jini.ha;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.net.InetAddress;
-import java.rmi.Remote;
-import java.util.Arrays;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import junit.framework.AssertionFailedError;
-import net.jini.config.Configuration;
-import net.jini.config.ConfigurationException;
-import net.jini.config.ConfigurationProvider;
-import net.jini.core.lookup.ServiceID;
-import net.jini.core.lookup.ServiceItem;
-import net.jini.core.lookup.ServiceTemplate;
-import net.jini.discovery.DiscoveryEvent;
-import net.jini.discovery.DiscoveryListener;
-import net.jini.discovery.LookupDiscoveryManager;
-import net.jini.lease.LeaseRenewalManager;
-import net.jini.lookup.ServiceDiscoveryManager;
-
-import org.apache.system.SystemUtil;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.ConnectionLossException;
-import org.apache.zookeeper.KeeperException.NodeExistsException;
-import org.apache.zookeeper.KeeperException.SessionExpiredException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.ZooKeeper.States;
-import org.apache.zookeeper.data.ACL;
-
-import com.bigdata.ha.HAGlue;
-import com.bigdata.ha.HAStatusEnum;
-import com.bigdata.ha.IndexManagerCallable;
-import com.bigdata.ha.RunState;
-import com.bigdata.ha.msg.HARootBlockRequest;
-import com.bigdata.ha.msg.HASnapshotDigestRequest;
-import com.bigdata.ha.msg.IHASnapshotResponse;
-import com.bigdata.jini.start.IServiceListener;
-import com.bigdata.jini.start.config.JavaServiceConfiguration;
-import com.bigdata.jini.start.config.ServiceConfiguration;
-import com.bigdata.jini.start.config.ServiceConfiguration.AbstractServiceStarter;
-import com.bigdata.jini.start.config.ZookeeperClientConfig;
-import com.bigdata.jini.start.process.ProcessHelper;
-import com.bigdata.jini.util.ConfigMath;
-import com.bigdata.jini.util.JiniUtil;
-import com.bigdata.journal.IRootBlockView;
-import com.bigdata.journal.StoreState;
-import com.bigdata.journal.jini.ha.HAJournalServer.ConfigurationOptions;
-import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest;
-import com.bigdata.quorum.AbstractQuorumClient;
-import com.bigdata.quorum.AsynchronousQuorumCloseException;
-import com.bigdata.quorum.Quorum;
-import com.bigdata.quorum.QuorumClient;
-import com.bigdata.quorum.QuorumException;
-import com.bigdata.quorum.zk.ZKQuorumClient;
-import com.bigdata.quorum.zk.ZKQuorumImpl;
-import com.bigdata.rdf.sail.webapp.client.HttpException;
-import com.bigdata.service.jini.JiniClientConfig;
-import com.bigdata.service.jini.RemoteDestroyAdmin;
-import com.bigdata.util.InnerCause;
-import com.bigdata.util.config.NicUtil;
-import com.bigdata.zookeeper.DumpZookeeper;
-import com.bigdata.zookeeper.ZooHelper;
-
-/**
- * Class layers in support to start and stop the {@link HAJournalServer}
- * processes.
- *
- * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- */
-public class AbstractHA3JournalServerTestCase extends
- AbstractHAJournalServerTestCase implements DiscoveryListener {
-
- /** Quorum client used to monitor (or act on) the logical service quorum. */
- protected Quorum<HAGlue, QuorumClient<HAGlue>> quorum = null;
-
- public AbstractHA3JournalServerTestCase() {
- }
-
- public AbstractHA3JournalServerTestCase(final String name) {
- super(name);
- }
-
- /**
- * The timeout in milliseconds to await the discovery of a service if there
- * is a cache miss (default {@value #DEFAULT_CACHE_MISS_TIMEOUT}).
- */
- static final protected long cacheMissTimeout = 2000;
-
- /**
- * Implementation listens for the death of the child process and can be used
- * to decide when the child process is no longer executing.
- */
- private static class ServiceListener implements IServiceListener {
-
- private volatile HAGlue haGlue;
- private volatile ProcessHelper processHelper;
- private volatile boolean dead = false;
- private volatile int childPID = 0;
-
- public ServiceListener() {
-
- }
-
- public void setService(final HAGlue haGlue) {
-
- if (haGlue == null)
- throw new IllegalArgumentException();
-
- this.haGlue = haGlue;
- }
-
- @SuppressWarnings("unused")
- public HAGlue getHAGlue() {
-
- return haGlue;
-
- }
-
- public void add(final ProcessHelper processHelper) {
-
- if (processHelper == null)
- throw new IllegalArgumentException();
-
- this.processHelper = processHelper;
-
- }
-
- public void remove(final ProcessHelper processHelper) {
-
- if (processHelper == null)
- throw new IllegalArgumentException();
-
- if (processHelper != this.processHelper)
- throw new AssertionError();
-
- /*
- * Note: Do not clear the [processHelper] field.
- */
-
- // Mark the process as known dead.
- dead = true;
-
- }
-
- public Process...
[truncated message content] |
|
From: <tho...@us...> - 2014-03-26 01:31:04
|
Revision: 8016
http://sourceforge.net/p/bigdata/code/8016
Author: thompsonbry
Date: 2014-03-26 01:30:59 +0000 (Wed, 26 Mar 2014)
Log Message:
-----------
Bug fix for the concurrent create/drop and list of namespaces.
See #867 (NSS concurrency problem with list namespaces and create namespace)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHANamespace.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-03-24 15:41:50 UTC (rev 8015)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-03-26 01:30:59 UTC (rev 8016)
@@ -82,6 +82,7 @@
import com.bigdata.rdf.sail.webapp.client.DefaultClientConnectionManagerFactory;
import com.bigdata.rdf.sail.webapp.client.HttpException;
import com.bigdata.rdf.sail.webapp.client.RemoteRepository;
+import com.bigdata.rdf.sail.webapp.client.RemoteRepositoryManager;
import com.bigdata.util.InnerCause;
import com.bigdata.util.concurrent.DaemonThreadFactory;
@@ -551,6 +552,21 @@
}
+ protected RemoteRepositoryManager getRemoteRepositoryManager(final HAGlue haGlue)
+ throws IOException {
+
+ final String endpointURL = getNanoSparqlServerURL(haGlue);
+
+ // Client for talking to the NSS.
+ final HttpClient httpClient = new DefaultHttpClient(ccm);
+
+ final RemoteRepositoryManager repo = new RemoteRepositoryManager(endpointURL,
+ httpClient, executorService);
+
+ return repo;
+
+ }
+
/**
* Counts the #of results in a SPARQL result set.
*
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHANamespace.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHANamespace.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHANamespace.java 2014-03-26 01:30:59 UTC (rev 8016)
@@ -0,0 +1,211 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+package com.bigdata.journal.jini.ha;
+
+import java.util.Properties;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.openrdf.model.Statement;
+import org.openrdf.query.GraphQueryResult;
+
+import com.bigdata.ha.HAGlue;
+import com.bigdata.ha.HAStatusEnum;
+import com.bigdata.rdf.sail.BigdataSail;
+import com.bigdata.rdf.sail.webapp.client.RemoteRepositoryManager;
+
+/**
+ * Test case for concurrent list namespace and create namespace operations.
+ * <p>
+ * Note: The underlying issue is NOT HA specific. This test SHOULD be ported
+ * to the standard NSS test suite.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/867"> NSS concurrency
+ * problem with list namespaces and create namespace </a>
+ */
+public class TestHANamespace extends AbstractHA3JournalServerTestCase {
+
+ public TestHANamespace() {
+ }
+
+ public TestHANamespace(String name) {
+ super(name);
+ }
+
+ /**
+ * Test case for concurrent list namespace and create namespace operations.
+ * <p>
+ * Note: The underlying issue is NOT HA specific. This test SHOULD be ported
+ * to the standard NSS test suite.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/867"> NSS concurrency
+ * problem with list namespaces and create namespace </a>
+ */
+ public void test_ticket_867() throws Throwable {
+
+ /*
+ * Controls the #of create/drop namespace operations. This many permits
+ * are obtained, and a permit is released each time we do a create
+ * namespace or drop namespace operation.
+ */
+ final int NPERMITS = 50;
+
+ /*
+ * Controls the #of queries that are executed in the main thread
+ * concurrent with those create/drop namespace operations.
+ */
+ final int NQUERIES = 10;
+
+ final String NAMESPACE_PREFIX = getName() + "-";
+
+ final ABC abc = new ABC(false/* simultaneous */);
+
+ // Await quorum meet.
+ final long token = quorum.awaitQuorum(awaitQuorumTimeout,
+ TimeUnit.MILLISECONDS);
+
+ // Figure out which service is the leader.
+ final HAGlue leader = quorum.getClient().getLeader(token);
+
+ // Wait until up and running as the leader.
+ awaitHAStatus(leader, HAStatusEnum.Leader);
+
+ final RemoteRepositoryManager repositoryManager = getRemoteRepositoryManager(leader);
+
+ final Semaphore awaitDone = new Semaphore(0);
+
+ final AtomicReference<Exception> failure = new AtomicReference<Exception>(null);
+
+ try {
+
+ final Thread getNamespacesThread = new Thread(new Runnable() {
+
+ @Override
+ public void run() {
+
+ try {
+
+ /*
+ * Create-delete namespaces with incrementing number in
+ * name.
+ */
+ int n = 0;
+ while (true) {
+
+ final String namespace = NAMESPACE_PREFIX + n;
+
+ final Properties props = new Properties();
+
+ props.put(BigdataSail.Options.NAMESPACE,
+ namespace);
+
+ if (log.isInfoEnabled())
+ log.info("Creating namespace " + namespace);
+
+ repositoryManager
+ .createRepository(namespace, props);
+
+ awaitDone.release(); // release a permit.
+
+ if (n % 2 == 0) {
+
+ if (log.isInfoEnabled())
+ log.info("Removing namespace " + namespace);
+
+ repositoryManager.deleteRepository(namespace);
+
+ }
+
+ n++;
+
+ }
+
+ } catch (Exception e) {
+ failure.set(e);
+ } finally {
+ // release all permits.
+ awaitDone.release(NPERMITS);
+ }
+
+ }
+
+ });
+
+ // Start running the create/drop namespace thread.
+ getNamespacesThread.start();
+
+ try {
+ /*
+ * Run list namespace requests concurrent with the create/drop
+ * namespace requests.
+ *
+ * FIXME Martyn: The list namespace requests should be running
+ * fully asynchronously with respect to the create/drop
+ * namespace requests, not getting a new set of permits and then
+ * just running the list namespace once for those NPERMITS
+ * create/drop requests. The way this is setup is missing too
+ * many opportunities for a concurrency issue with only one list
+ * namespace request per 50 create/drop requests.
+ */
+ for (int n = 0; n < NQUERIES; n++) {
+ awaitDone.acquire(NPERMITS);
+
+ if (failure.get() != null)
+ fail("Thread failure", failure.get());
+
+ if (log.isInfoEnabled())
+ log.info("Get namespace list...");
+
+ try {
+
+ final GraphQueryResult gqres = repositoryManager
+ .getRepositoryDescriptions();
+ int count = 0;
+ while (gqres.hasNext()) {
+ final Statement st = gqres.next();
+ if (log.isInfoEnabled())
+ log.info("Statement: " + st);
+ count++;
+ }
+ log.warn("Processed " + count + " statements");
+ assertTrue(count > 0);
+ } catch (Exception e) {
+ fail("Unable to retrieve namespaces", e);
+
+ }
+ }
+ } finally {
+ getNamespacesThread.interrupt();
+ }
+
+ } finally {
+
+ // repositoryManager.shutdown();
+
+ }
+
+ }
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-03-24 15:41:50 UTC (rev 8015)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-03-26 01:30:59 UTC (rev 8016)
@@ -72,19 +72,14 @@
import org.openrdf.rio.RDFWriterRegistry;
import org.openrdf.sail.SailException;
-import com.bigdata.bop.BufferAnnotations;
-import com.bigdata.bop.IPredicate;
import com.bigdata.bop.engine.IRunningQuery;
import com.bigdata.bop.engine.QueryEngine;
-import com.bigdata.bop.join.PipelineJoin;
-import com.bigdata.btree.IndexMetadata;
import com.bigdata.counters.CAT;
import com.bigdata.io.NullOutputStream;
-import com.bigdata.journal.IBufferStrategy;
import com.bigdata.journal.IIndexManager;
+import com.bigdata.journal.ITransactionService;
import com.bigdata.journal.ITx;
import com.bigdata.journal.Journal;
-import com.bigdata.journal.RWStrategy;
import com.bigdata.journal.TimestampUtility;
import com.bigdata.rdf.changesets.IChangeLog;
import com.bigdata.rdf.changesets.IChangeRecord;
@@ -106,9 +101,7 @@
import com.bigdata.rdf.sparql.ast.QueryType;
import com.bigdata.rdf.sparql.ast.Update;
import com.bigdata.rdf.store.AbstractTripleStore;
-import com.bigdata.relation.AbstractResource;
import com.bigdata.relation.RelationSchema;
-import com.bigdata.rwstore.RWStore;
import com.bigdata.sparse.ITPS;
import com.bigdata.sparse.SparseRowStore;
import com.bigdata.util.concurrent.DaemonThreadFactory;
@@ -2208,273 +2201,163 @@
}
/**
- * Return various interesting metadata about the KB state.
+ * Return a list of the namespaces for the {@link AbstractTripleStore}s
+ * registered against the bigdata instance.
*
- * @todo The range counts can take some time if the cluster is heavily
- * loaded since they must query each shard for the primary statement
- * index and the TERM2ID index.
+ * @see <a href="http://trac.bigdata.com/ticket/867"> NSS concurrency
+ * problem with list namespaces and create namespace </a>
*/
- protected StringBuilder getKBInfo(final String namespace,
- final long timestamp) {
+ /*package*/ List<String> getNamespaces(final long timestamp) {
+
+ final long tx = newTx(timestamp);
+
+ try {
+
+ return getNamespaces(timestamp, tx);
+
+ } finally {
+
+ abortTx(tx);
+
+ }
- final StringBuilder sb = new StringBuilder();
+ }
- BigdataSailRepositoryConnection conn = null;
+ private List<String> getNamespaces(long timestamp, final long tx) {
- try {
+ if (timestamp == ITx.READ_COMMITTED) {
- conn = getQueryConnection(namespace, timestamp);
-
- final AbstractTripleStore tripleStore = conn.getTripleStore();
+ // Use the last commit point.
+ timestamp = getIndexManager().getLastCommitTime();
- sb.append("class\t = " + tripleStore.getClass().getName() + "\n");
+ }
- sb
- .append("indexManager\t = "
- + tripleStore.getIndexManager().getClass()
- .getName() + "\n");
+ // the triple store namespaces.
+ final List<String> namespaces = new LinkedList<String>();
- sb.append("namespace\t = " + tripleStore.getNamespace() + "\n");
+ if (log.isInfoEnabled())
+ log.info("getNamespaces for " + timestamp);
- sb.append("timestamp\t = "
- + TimestampUtility.toString(tripleStore.getTimestamp())
- + "\n");
+ final SparseRowStore grs = getIndexManager().getGlobalRowStore(
+ timestamp);
- sb.append("statementCount\t = " + tripleStore.getStatementCount()
- + "\n");
+ if (grs == null) {
- sb.append("termCount\t = " + tripleStore.getTermCount() + "\n");
+ log.warn("No GRS @ timestamp="
+ + TimestampUtility.toString(timestamp));
- sb.append("uriCount\t = " + tripleStore.getURICount() + "\n");
+ // Empty.
+ return namespaces;
- sb.append("literalCount\t = " + tripleStore.getLiteralCount() + "\n");
+ }
- /*
- * Note: The blank node count is only available when using the told
- * bnodes mode.
- */
- sb
- .append("bnodeCount\t = "
- + (tripleStore.getLexiconRelation()
- .isStoreBlankNodes() ? ""
- + tripleStore.getBNodeCount() : "N/A")
- + "\n");
+ // scan the relation schema in the global row store.
+ @SuppressWarnings("unchecked")
+ final Iterator<ITPS> itr = (Iterator<ITPS>) grs
+ .rangeIterator(RelationSchema.INSTANCE);
- sb.append(IndexMetadata.Options.BTREE_BRANCHING_FACTOR
- + "="
- + tripleStore.getSPORelation().getPrimaryIndex()
- .getIndexMetadata().getBranchingFactor() + "\n");
+ while (itr.hasNext()) {
- sb.append(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY
- + "="
- + tripleStore.getSPORelation().getPrimaryIndex()
- .getIndexMetadata()
- .getWriteRetentionQueueCapacity() + "\n");
+ // A timestamped property value set is a logical row with
+ // timestamped property values.
+ final ITPS tps = itr.next();
- sb.append("-- All properties.--\n");
-
- // get the triple store's properties from the global row store.
- final Map<String, Object> properties = getIndexManager()
- .getGlobalRowStore(timestamp).read(RelationSchema.INSTANCE,
- namespace);
+ // If you want to see what is in the TPS, uncomment this.
+ // System.err.println(tps.toString());
- // write them out,
- for (String key : properties.keySet()) {
- sb.append(key + "=" + properties.get(key)+"\n");
- }
+ // The namespace is the primary key of the logical row for the
+ // relation schema.
+ final String namespace = (String) tps.getPrimaryKey();
- /*
- * And show some properties which can be inherited from
- * AbstractResource. These have been mainly phased out in favor of
- * BOP annotations, but there are a few places where they are still
- * in use.
- */
-
- sb.append("-- Interesting AbstractResource effective properties --\n");
-
- sb.append(AbstractResource.Options.CHUNK_CAPACITY + "="
- + tripleStore.getChunkCapacity() + "\n");
+ // Get the name of the implementation class
+ // (AbstractTripleStore, SPORelation, LexiconRelation, etc.)
+ final String className = (String) tps.get(RelationSchema.CLASS)
+ .getValue();
- sb.append(AbstractResource.Options.CHUNK_OF_CHUNKS_CAPACITY + "="
- + tripleStore.getChunkOfChunksCapacity() + "\n");
+ if (className == null) {
+ // Skip deleted triple store entry.
+ continue;
+ }
- sb.append(AbstractResource.Options.CHUNK_TIMEOUT + "="
- + tripleStore.getChunkTimeout() + "\n");
+ try {
+ final Class<?> cls = Class.forName(className);
+ if (AbstractTripleStore.class.isAssignableFrom(cls)) {
+ // this is a triple store (vs something else).
+ namespaces.add(namespace);
+ }
+ } catch (ClassNotFoundException e) {
+ log.error(e, e);
+ }
- sb.append(AbstractResource.Options.FULLY_BUFFERED_READ_THRESHOLD + "="
- + tripleStore.getFullyBufferedReadThreshold() + "\n");
+ }
- sb.append(AbstractResource.Options.MAX_PARALLEL_SUBQUERIES + "="
- + tripleStore.getMaxParallelSubqueries() + "\n");
+// if (log.isInfoEnabled())
+// log.info("getNamespaces returning " + namespaces.size());
- /*
- * And show some interesting effective properties for the KB, SPO
- * relation, and lexicon relation.
- */
- sb.append("-- Interesting KB effective properties --\n");
-
- sb
- .append(AbstractTripleStore.Options.TERM_CACHE_CAPACITY
- + "="
- + tripleStore
- .getLexiconRelation()
- .getProperties()
- .getProperty(
- AbstractTripleStore.Options.TERM_CACHE_CAPACITY,
- AbstractTripleStore.Options.DEFAULT_TERM_CACHE_CAPACITY) + "\n");
+ return namespaces;
- /*
- * And show several interesting properties with their effective
- * defaults.
- */
+ }
+
+ /**
+ * Obtain a new transaction to protect operations against the specified view
+ * of the database.
+ *
+ * @param timestamp
+ * The timestamp for the desired view.
+ *
+ * @return The transaction identifier -or- <code>timestamp</code> if the
+ * {@link IIndexManager} is not a {@link Journal}.
+ *
+ * @see ITransactionService#newTx(long)
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/867"> NSS concurrency
+ * problem with list namespaces and create namespace </a>
+ */
+ public long newTx(final long timestamp) {
- sb.append("-- Interesting Effective BOP Annotations --\n");
+ long tx = timestamp; // use dirty reads unless Journal.
- sb.append(BufferAnnotations.CHUNK_CAPACITY
- + "="
- + tripleStore.getProperties().getProperty(
- BufferAnnotations.CHUNK_CAPACITY,
- "" + BufferAnnotations.DEFAULT_CHUNK_CAPACITY)
- + "\n");
+ if (getIndexManager() instanceof Journal) {
+ final ITransactionService txs = ((Journal) getIndexManager())
+ .getLocalTransactionManager().getTransactionService();
- sb
- .append(BufferAnnotations.CHUNK_OF_CHUNKS_CAPACITY
- + "="
- + tripleStore
- .getProperties()
- .getProperty(
- BufferAnnotations.CHUNK_OF_CHUNKS_CAPACITY,
- ""
- + BufferAnnotations.DEFAULT_CHUNK_OF_CHUNKS_CAPACITY)
- + "\n");
-
- sb.append(BufferAnnotations.CHUNK_TIMEOUT
- + "="
- + tripleStore.getProperties().getProperty(
- BufferAnnotations.CHUNK_TIMEOUT,
- "" + BufferAnnotations.DEFAULT_CHUNK_TIMEOUT)
- + "\n");
-
- sb.append(PipelineJoin.Annotations.MAX_PARALLEL_CHUNKS
- + "="
- + tripleStore.getProperties().getProperty(
- PipelineJoin.Annotations.MAX_PARALLEL_CHUNKS,
- "" + PipelineJoin.Annotations.DEFAULT_MAX_PARALLEL_CHUNKS) + "\n");
-
- sb
- .append(IPredicate.Annotations.FULLY_BUFFERED_READ_THRESHOLD
- + "="
- + tripleStore
- .getProperties()
- .getProperty(
- IPredicate.Annotations.FULLY_BUFFERED_READ_THRESHOLD,
- ""
- + IPredicate.Annotations.DEFAULT_FULLY_BUFFERED_READ_THRESHOLD)
- + "\n");
-
- // sb.append(tripleStore.predicateUsage());
-
- if (tripleStore.getIndexManager() instanceof Journal) {
-
- final Journal journal = (Journal) tripleStore.getIndexManager();
-
- final IBufferStrategy strategy = journal.getBufferStrategy();
-
- if (strategy instanceof RWStrategy) {
-
- final RWStore store = ((RWStrategy) strategy).getStore();
-
- store.showAllocators(sb);
-
- }
-
+ try {
+ tx = txs.newTx(timestamp);
+ } catch (IOException e) {
+ // Note: Local operation. Will not throw IOException.
+ throw new RuntimeException(e);
}
- } catch (Throwable t) {
-
- log.warn(t.getMessage(), t);
-
- } finally {
-
- if(conn != null) {
- try {
- conn.close();
- } catch (RepositoryException e) {
- log.error(e, e);
- }
-
- }
-
}
- return sb;
-
+ return tx;
}
- /**
- * Return a list of the namespaces for the {@link AbstractTripleStore}s
- * registered against the bigdata instance.
- */
- /*package*/ List<String> getNamespaces(final long timestamp) {
-
- // the triple store namespaces.
- final List<String> namespaces = new LinkedList<String>();
+ /**
+ * Abort a transaction obtained by {@link #newTx(long)}.
+ *
+ * @param tx
+ * The transaction identifier.
+ */
+ public void abortTx(final long tx) {
+ if (getIndexManager() instanceof Journal) {
+// if (!TimestampUtility.isReadWriteTx(tx)) {
+// // Not a transaction.
+// throw new IllegalStateException();
+// }
- final SparseRowStore grs = getIndexManager().getGlobalRowStore(
- timestamp);
+ final ITransactionService txs = ((Journal) getIndexManager())
+ .getLocalTransactionManager().getTransactionService();
- if (grs == null) {
+ try {
+ txs.abort(tx);
+ } catch (IOException e) {
+ // Note: Local operation. Will not throw IOException.
+ throw new RuntimeException(e);
+ }
- log.warn("No GRS @ timestamp="
- + TimestampUtility.toString(timestamp));
+ }
- // Empty.
- return namespaces;
-
- }
-
- // scan the relation schema in the global row store.
- @SuppressWarnings("unchecked")
- final Iterator<ITPS> itr = (Iterator<ITPS>) grs
- .rangeIterator(RelationSchema.INSTANCE);
-
- while (itr.hasNext()) {
-
- // A timestamped property value set is a logical row with
- // timestamped property values.
- final ITPS tps = itr.next();
-
- // If you want to see what is in the TPS, uncomment this.
-// System.err.println(tps.toString());
-
- // The namespace is the primary key of the logical row for the
- // relation schema.
- final String namespace = (String) tps.getPrimaryKey();
-
- // Get the name of the implementation class
- // (AbstractTripleStore, SPORelation, LexiconRelation, etc.)
- final String className = (String) tps.get(RelationSchema.CLASS)
- .getValue();
-
- if (className == null) {
- // Skip deleted triple store entry.
- continue;
- }
-
- try {
- final Class<?> cls = Class.forName(className);
- if (AbstractTripleStore.class.isAssignableFrom(cls)) {
- // this is a triple store (vs something else).
- namespaces.add(namespace);
- }
- } catch (ClassNotFoundException e) {
- log.error(e,e);
- }
-
- }
-
- return namespaces;
-
- }
-
+ }
+
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java 2014-03-24 15:41:50 UTC (rev 8015)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java 2014-03-26 01:30:59 UTC (rev 8016)
@@ -343,17 +343,6 @@
// context.setAttribute(BigdataServlet.ATTRIBUTE_SPARQL_CACHE,
// new SparqlCache(new MemoryManager(DirectBufferPool.INSTANCE)));
- if (log.isInfoEnabled()) {
- /*
- * Log some information about the default kb (#of statements, etc).
- */
- final long effectiveTimestamp = config.timestamp == ITx.READ_COMMITTED ? indexManager
- .getLastCommitTime() : config.timestamp;
- log.info("\n"
- + rdfContext
- .getKBInfo(config.namespace, effectiveTimestamp));
- }
-
{
final boolean forceOverflow = Boolean.valueOf(context
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-03-24 15:41:50 UTC (rev 8015)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-03-26 01:30:59 UTC (rev 8016)
@@ -459,30 +459,47 @@
* @throws IOException
*/
private void doShowProperties(final HttpServletRequest req,
- final HttpServletResponse resp) throws IOException {
+ final HttpServletResponse resp) throws IOException {
- final String namespace = getNamespace(req);
+ final String namespace = getNamespace(req);
- final long timestamp = getTimestamp(req);
+ long timestamp = getTimestamp(req);
- final AbstractTripleStore tripleStore = getBigdataRDFContext()
- .getTripleStore(namespace, timestamp);
+ if (timestamp == ITx.READ_COMMITTED) {
- if (tripleStore == null) {
- /*
- * There is no such triple/quad store instance.
- */
- buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN);
- return;
- }
+ // Use the last commit point.
+ timestamp = getIndexManager().getLastCommitTime();
- final Properties properties = PropertyUtil.flatCopy(tripleStore
- .getProperties());
+ }
- sendProperties(req, resp, properties);
-
- }
+ final long tx = getBigdataRDFContext().newTx(timestamp);
+
+ try {
+
+ final AbstractTripleStore tripleStore = getBigdataRDFContext()
+ .getTripleStore(namespace, timestamp);
+ if (tripleStore == null) {
+ /*
+ * There is no such triple/quad store instance.
+ */
+ buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN);
+ return;
+ }
+
+ final Properties properties = PropertyUtil.flatCopy(tripleStore
+ .getProperties());
+
+ sendProperties(req, resp, properties);
+
+ } finally {
+
+ getBigdataRDFContext().abortTx(tx);
+
+ }
+
+ }
+
/**
* Generate a VoID Description for the known namespaces.
*/
@@ -498,51 +515,66 @@
}
- /*
- * The set of registered namespaces for KBs.
+ /**
+ * Protect the entire operation with a transaction, including the
+ * describe of each namespace that we discover.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/867"> NSS concurrency
+ * problem with list namespaces and create namespace </a>
*/
- final List<String> namespaces = getBigdataRDFContext()
- .getNamespaces(timestamp);
+ final long tx = getBigdataRDFContext().newTx(timestamp);
+
+ try {
+ /*
+ * The set of registered namespaces for KBs.
+ */
+ final List<String> namespaces = getBigdataRDFContext()
+ .getNamespaces(timestamp);
- fi...
[truncated message content] |
|
From: <mrp...@us...> - 2014-04-02 16:13:06
|
Revision: 8028
http://sourceforge.net/p/bigdata/code/8028
Author: mrpersonick
Date: 2014-04-02 16:13:03 +0000 (Wed, 02 Apr 2014)
Log Message:
-----------
fixing ticket 872 - added a magic predicate to full text search for range count
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/FullTextIndex.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTSearchOptimizer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SearchServiceFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/FullTextIndex.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/FullTextIndex.java 2014-04-02 13:14:09 UTC (rev 8027)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/FullTextIndex.java 2014-04-02 16:13:03 UTC (rev 8028)
@@ -955,35 +955,137 @@
}
+ /**
+ * Perform a range count on a full text query.
+ */
public int count(final FullTextQuery query) {
- final Hit[] a = _search(query);
+ if (cache.containsKey(query)) {
+
+ if (log.isInfoEnabled())
+ log.info("found hits in cache");
+
+ return cache.get(query).length;
+
+ } else {
+
+ if (log.isInfoEnabled())
+ log.info("did not find hits in cache");
+
+ }
+
+ // tokenize the query.
+ final TermFrequencyData<V> qdata = tokenize(query);
+
+ // No terms after stopword extraction
+ if (qdata == null) {
+
+ cache.put(query, new Hit[] {});
+
+ return 0;
+
+ }
+
+ /*
+ * We can run an optimized version of this (just a quick range count)
+ * but only if the caller does not care about exact match and has
+ * not specified a regex.
+ */
+ if (qdata.distinctTermCount() == 1 &&
+ !query.isMatchExact() && query.getMatchRegex() == null) {
+
+ final boolean prefixMatch = query.isPrefixMatch();
+
+ final Map.Entry<String, ITermMetadata> e = qdata.getSingletonEntry();
+
+ final String termText = e.getKey();
+
+ final ITermMetadata md = e.getValue();
+
+ final CountIndexTask<V> task1 = new CountIndexTask<V>(termText, 0, 1,
+ prefixMatch, md.getLocalTermWeight(), this);
+
+ return (int) task1.getRangeCount();
+
+ } else {
+
+ final Hit<V>[] a = _search(query);
+
+ return a.length;
+
+ }
- return a.length;
-
}
- public Hit<V>[] _search(final FullTextQuery q) {
+ protected TermFrequencyData<V> tokenize(final FullTextQuery query) {
- final String query = q.getQuery();
- final String languageCode = q.getLanguageCode();
- final boolean prefixMatch = q.isPrefixMatch();
- final double minCosine = q.getMinCosine();
- final double maxCosine = q.getMaxCosine();
- final int minRank = q.getMinRank();
- final int maxRank = q.getMaxRank();
- final boolean matchAllTerms = q.isMatchAllTerms();
- final boolean matchExact = q.isMatchExact();
- final String regex = q.getMatchRegex();
- long timeout = q.getTimeout();
- final TimeUnit unit = q.getTimeUnit();
+ final String q = query.getQuery();
+ final String languageCode = query.getLanguageCode();
+ final boolean prefixMatch = query.isPrefixMatch();
+ // tokenize the query.
+ final TermFrequencyData<V> qdata;
+ {
+
+ final TokenBuffer<V> buffer = new TokenBuffer<V>(1, this);
+
+ /*
+ * If we are using prefix match ('*' operator) then we don't want to
+ * filter stopwords from the search query.
+ */
+ final boolean filterStopwords = !prefixMatch;
+
+ index(buffer, //
+ null, // docId // was Long.MIN_VALUE
+ Integer.MIN_VALUE, // fieldId
+ languageCode,//
+ new StringReader(q), //
+ filterStopwords//
+ );
+
+ if (buffer.size() == 0) {
+
+ /*
+ * There were no terms after stopword extration.
+ */
+
+ log.warn("No terms after stopword extraction: query=" + query);
+
+ return null;
+
+ }
+
+ qdata = buffer.get(0);
+
+ qdata.normalize();
+
+ }
+
+ return qdata;
+
+ }
+
+ public Hit<V>[] _search(final FullTextQuery query) {
+
+ final String queryStr = query.getQuery();
+ final String languageCode = query.getLanguageCode();
+ final boolean prefixMatch = query.isPrefixMatch();
+ final double minCosine = query.getMinCosine();
+ final double maxCosine = query.getMaxCosine();
+ final int minRank = query.getMinRank();
+ final int maxRank = query.getMaxRank();
+ final boolean matchAllTerms = query.isMatchAllTerms();
+ final boolean matchExact = query.isMatchExact();
+ final String regex = query.getMatchRegex();
+ long timeout = query.getTimeout();
+ final TimeUnit unit = query.getTimeUnit();
+
final long begin = System.currentTimeMillis();
// if (languageCode == null)
// throw new IllegalArgumentException();
- if (query == null)
+ if (queryStr == null)
throw new IllegalArgumentException();
if (minCosine < 0d || minCosine > 1d)
@@ -1002,7 +1104,7 @@
throw new IllegalArgumentException();
if (log.isInfoEnabled())
- log.info("languageCode=[" + languageCode + "], text=[" + query
+ log.info("languageCode=[" + languageCode + "], text=[" + queryStr
+ "], minCosine=" + minCosine
+ ", maxCosine=" + maxCosine
+ ", minRank=" + minRank
@@ -1018,7 +1120,7 @@
}
- final FullTextQuery cacheKey = q;
+ final FullTextQuery cacheKey = query;
Hit<V>[] a;
@@ -1034,145 +1136,24 @@
if (log.isInfoEnabled())
log.info("did not find hits in cache");
- // tokenize the query.
- final TermFrequencyData<V> qdata;
- {
-
- final TokenBuffer<V> buffer = new TokenBuffer<V>(1, this);
-
- /*
- * If we are using prefix match ('*' operator) then we don't want to
- * filter stopwords from the search query.
- */
- final boolean filterStopwords = !prefixMatch;
-
- index(buffer, //
- null, // docId // was Long.MIN_VALUE
- Integer.MIN_VALUE, // fieldId
- languageCode,//
- new StringReader(query), //
- filterStopwords//
- );
-
- if (buffer.size() == 0) {
-
- /*
- * There were no terms after stopword extration.
- */
-
- log.warn("No terms after stopword extraction: query=" + query);
-
- a = new Hit[] {};
-
- cache.put(cacheKey, a);
-
- return a;
-
- }
-
- qdata = buffer.get(0);
-
- qdata.normalize();
-
- }
-
- final IHitCollector<V> hits;
-
- if (qdata.distinctTermCount() == 1) {
-
- final Map.Entry<String, ITermMetadata> e = qdata.getSingletonEntry();
-
- final String termText = e.getKey();
+ // tokenize the query.
+ final TermFrequencyData<V> qdata = tokenize(query);
+
+ // No terms after stopword extraction
+ if (qdata == null) {
- final ITermMetadata md = e.getValue();
-
- final CountIndexTask<V> task1 = new CountIndexTask<V>(termText, 0, 1, prefixMatch, md
- .getLocalTermWeight(), this);
-
- hits = new SingleTokenHitCollector<V>(task1);
-
- } else {
-
- final List<CountIndexTask<V>> tasks = new ArrayList<CountIndexTask<V>>(
- qdata.distinctTermCount());
-
- int i = 0;
- for (Map.Entry<String, ITermMetadata> e : qdata.terms.entrySet()) {
-
- final String termText = e.getKey();
-
- final ITermMetadata md = e.getValue();
-
- tasks.add(new CountIndexTask<V>(termText, i++, qdata.terms.size(), prefixMatch, md
- .getLocalTermWeight(), this));
-
- }
-
- hits = new MultiTokenHitCollector<V>(tasks);
-
- }
-
- // run the queries.
- {
-
- final List<Callable<Object>> tasks = new ArrayList<Callable<Object>>(
- qdata.distinctTermCount());
-
- int i = 0;
- for (Map.Entry<String, ITermMetadata> e : qdata.terms.entrySet()) {
-
- final String termText = e.getKey();
-
- final ITermMetadata md = e.getValue();
-
- tasks.add(new ReadIndexTask<V>(termText, i++, qdata.terms.size(),
- prefixMatch, md.getLocalTermWeight(), this, hits));
-
- }
-
- final ExecutionHelper<Object> executionHelper = new ExecutionHelper<Object>(
- getExecutorService(), timeout, unit);
-
- try {
-
- final long start = System.currentTimeMillis();
-
- executionHelper.submitTasks(tasks);
-
- if (log.isInfoEnabled()) {
- final long readTime = System.currentTimeMillis() - start;
- log.info("read time: " + readTime);
- }
-
- } catch (InterruptedException ex) {
-
- if (log.isInfoEnabled()) {
- // TODO Should we wrap and toss this interrupt instead?
- log.info("Interrupted - only partial results will be returned.");
- }
-
- /*
- * Yes, let's toss it. We were getting into a situation
- * where the ExecutionHelper above received an interrupt
- * but we still went through the heavy-weight filtering
- * operations below (matchExact or matchRegex).
- */
- throw new RuntimeException(ex);
-
- } catch (ExecutionException ex) {
-
- throw new RuntimeException(ex);
-
- }
-
- }
-
- a = hits.getHits();
-
+ cache.put(cacheKey, a = new Hit[] {});
+
+ return a;
+
+ }
+
+ a = executeQuery(qdata, prefixMatch, timeout, unit);
+
if (a.length == 0) {
log.info("No hits: languageCode=[" + languageCode + "], query=["
- + query + "]");
+ + queryStr + "]");
cache.put(cacheKey, a);
@@ -1223,14 +1204,14 @@
*/
if (matchExact) {
- a = matchExact(a, query);
+ a = matchExact(a, queryStr);
}
if (a.length == 0) {
log.warn("No hits after matchAllTerms pruning: languageCode=[" + languageCode + "], query=["
- + query + "]");
+ + queryStr + "]");
cache.put(cacheKey, a);
@@ -1260,7 +1241,7 @@
if (a.length == 0) {
log.warn("No hits after regex pruning: languageCode=[" + languageCode + "], query=["
- + query + "], regex=[" + regex + "]");
+ + queryStr + "], regex=[" + regex + "]");
cache.put(cacheKey, a);
@@ -1299,6 +1280,27 @@
}
+ /*
+ * Take a slice of the hits based on min/max cosine and min/max rank.
+ */
+ a = slice(query, a);
+
+ final long elapsed = System.currentTimeMillis() - begin;
+
+ if (log.isInfoEnabled())
+ log.info("Done: " + a.length + " hits in " + elapsed + "ms");
+
+ return a;
+
+ }
+
+ protected Hit<V>[] slice(final FullTextQuery query, Hit<V>[] a) {
+
+ final double minCosine = query.getMinCosine();
+ final double maxCosine = query.getMaxCosine();
+ final int minRank = query.getMinRank();
+ final int maxRank = query.getMaxRank();
+
// if (log.isDebugEnabled()) {
// log.debug("before min/max cosine/rank pruning:");
// for (Hit<V> h : a)
@@ -1422,13 +1424,106 @@
}
- final long elapsed = System.currentTimeMillis() - begin;
+ return a;
- if (log.isInfoEnabled())
- log.info("Done: " + a.length + " hits in " + elapsed + "ms");
+ }
+
+ protected Hit<V>[] executeQuery(final TermFrequencyData<V> qdata,
+ final boolean prefixMatch, final long timeout, final TimeUnit unit) {
+
+ final IHitCollector<V> hits;
+
+ if (qdata.distinctTermCount() == 1) {
+
+ final Map.Entry<String, ITermMetadata> e = qdata.getSingletonEntry();
+
+ final String termText = e.getKey();
+
+ final ITermMetadata md = e.getValue();
- return a;
+ final CountIndexTask<V> task1 = new CountIndexTask<V>(termText, 0, 1,
+ prefixMatch, md.getLocalTermWeight(), this);
+
+ hits = new SingleTokenHitCollector<V>(task1);
+
+ } else {
+
+ final List<CountIndexTask<V>> tasks = new ArrayList<CountIndexTask<V>>(
+ qdata.distinctTermCount());
+
+ int i = 0;
+ for (Map.Entry<String, ITermMetadata> e : qdata.terms.entrySet()) {
+
+ final String termText = e.getKey();
+
+ final ITermMetadata md = e.getValue();
+
+ tasks.add(new CountIndexTask<V>(termText, i++, qdata.terms.size(),
+ prefixMatch, md.getLocalTermWeight(), this));
+
+ }
+
+ hits = new MultiTokenHitCollector<V>(tasks);
+
+ }
+ // run the queries.
+ {
+
+ final List<Callable<Object>> tasks = new ArrayList<Callable<Object>>(
+ qdata.distinctTermCount());
+
+ int i = 0;
+ for (Map.Entry<String, ITermMetadata> e : qdata.terms.entrySet()) {
+
+ final String termText = e.getKey();
+
+ final ITermMetadata md = e.getValue();
+
+ tasks.add(new ReadIndexTask<V>(termText, i++, qdata.terms.size(),
+ prefixMatch, md.getLocalTermWeight(), this, hits));
+
+ }
+
+ final ExecutionHelper<Object> executionHelper = new ExecutionHelper<Object>(
+ getExecutorService(), timeout, unit);
+
+ try {
+
+ final long start = System.currentTimeMillis();
+
+ executionHelper.submitTasks(tasks);
+
+ if (log.isInfoEnabled()) {
+ final long readTime = System.currentTimeMillis() - start;
+ log.info("read time: " + readTime);
+ }
+
+ } catch (InterruptedException ex) {
+
+ if (log.isInfoEnabled()) {
+ // TODO Should we wrap and toss this interrupt instead?
+ log.info("Interrupted - only partial results will be returned.");
+ }
+
+ /*
+ * Yes, let's toss it. We were getting into a situation
+ * where the ExecutionHelper above received an interrupt
+ * but we still went through the heavy-weight filtering
+ * operations below (matchExact or matchRegex).
+ */
+ throw new RuntimeException(ex);
+
+ } catch (ExecutionException ex) {
+
+ throw new RuntimeException(ex);
+
+ }
+
+ }
+
+ return hits.getHits();
+
}
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTSearchOptimizer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTSearchOptimizer.java 2014-04-02 13:14:09 UTC (rev 8027)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTSearchOptimizer.java 2014-04-02 16:13:03 UTC (rev 8028)
@@ -108,6 +108,7 @@
set.add(BDS.SUBJECT_SEARCH);
set.add(BDS.SEARCH_TIMEOUT);
set.add(BDS.MATCH_REGEX);
+ set.add(BDS.RANGE_COUNT);
searchUris = Collections.unmodifiableSet(set);
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SearchServiceFactory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SearchServiceFactory.java 2014-04-02 13:14:09 UTC (rev 8027)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SearchServiceFactory.java 2014-04-02 16:13:03 UTC (rev 8028)
@@ -69,6 +69,7 @@
import com.bigdata.rdf.store.BDS;
import com.bigdata.search.Hiterator;
import com.bigdata.search.IHit;
+import com.bigdata.striterator.ChunkedArrayIterator;
import cutthecrap.utils.striterators.ICloseableIterator;
@@ -300,6 +301,10 @@
assertObjectIsLiteral(sp);
+ } else if (uri.equals(BDS.RANGE_COUNT)) {
+
+ assertObjectIsVariable(sp);
+
} else if(uri.equals(BDS.MATCH_REGEX)) {
// a variable for the object is equivalent to regex = null
@@ -367,6 +372,7 @@
private final boolean subjectSearch;
private final Literal searchTimeout;
private final Literal matchRegex;
+ private final IVariable<?> rangeCountVar;
public SearchCall(
final AbstractTripleStore store,
@@ -415,6 +421,7 @@
IVariable<?> relVar = null;
IVariable<?> rankVar = null;
+ IVariable<?> rangeCountVar = null;
Literal minRank = null;
Literal maxRank = null;
Literal minRelevance = null;
@@ -439,6 +446,8 @@
relVar = oVar;
} else if (BDS.RANK.equals(p)) {
rankVar = oVar;
+ } else if (BDS.RANGE_COUNT.equals(p)) {
+ rangeCountVar = oVar;
} else if (BDS.MIN_RANK.equals(p)) {
minRank = (Literal) oVal;
} else if (BDS.MAX_RANK.equals(p)) {
@@ -484,6 +493,7 @@
this.subjectSearch = subjectSearch;
this.searchTimeout = searchTimeout;
this.matchRegex = matchRegex;
+ this.rangeCountVar = rangeCountVar;
}
@@ -527,6 +537,46 @@
}
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ private int getRangeCount() {
+
+// final IValueCentricTextIndexer<IHit> textIndex = (IValueCentricTextIndexer) store
+// .getLexiconRelation().getSearchEngine();
+
+ final ITextIndexer<IHit> textIndex = (ITextIndexer)
+ (this.subjectSearch ?
+ store.getLexiconRelation().getSubjectCentricSearchEngine() :
+ store.getLexiconRelation().getSearchEngine());
+
+ if (textIndex == null)
+ throw new UnsupportedOperationException("No free text index?");
+
+ String s = query.getLabel();
+ final boolean prefixMatch;
+ if (s.indexOf('*') >= 0) {
+ prefixMatch = true;
+ s = s.replaceAll("\\*", "");
+ } else {
+ prefixMatch = false;
+ }
+
+ return textIndex.count(new FullTextQuery(
+ s,//
+ query.getLanguage(),//
+ prefixMatch,//
+ matchRegex == null ? null : matchRegex.stringValue(),
+ matchAllTerms,
+ matchExact,
+ minRelevance == null ? BDS.DEFAULT_MIN_RELEVANCE : minRelevance.doubleValue()/* minCosine */,
+ maxRelevance == null ? BDS.DEFAULT_MAX_RELEVANCE : maxRelevance.doubleValue()/* maxCosine */,
+ minRank == null ? BDS.DEFAULT_MIN_RANK/*1*/ : minRank.intValue()/* minRank */,
+ maxRank == null ? BDS.DEFAULT_MAX_RANK/*Integer.MAX_VALUE*/ : maxRank.intValue()/* maxRank */,
+ searchTimeout == null ? BDS.DEFAULT_TIMEOUT/*0L*/ : searchTimeout.longValue()/* timeout */,
+ TimeUnit.MILLISECONDS
+ ));
+
+ }
+
/**
* {@inheritDoc}
*
@@ -561,7 +611,24 @@
}
- return new HitConverter(getHiterator());
+ if (rangeCountVar != null) {
+
+ final int i = getRangeCount();
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ final ListBindingSet bs = new ListBindingSet(
+ new IVariable[] { rangeCountVar },
+ new IConstant[] { new Constant(new XSDNumericIV(i)) });
+
+ return new ChunkedArrayIterator<IBindingSet>(new IBindingSet[] {
+ bs
+ });
+
+ } else {
+
+ return new HitConverter(getHiterator());
+
+ }
}
@@ -631,11 +698,11 @@
final ListBindingSet bs = new ListBindingSet(vars, vals);
- if (log.isInfoEnabled()) {
- log.info(bs);
- log.info(query.getClass());
- log.info(((BigdataLiteral) query).getIV());
- log.info(((BigdataLiteral) query).getIV().getClass());
+ if (log.isTraceEnabled()) {
+ log.trace(bs);
+ log.trace(query.getClass());
+ log.trace(((BigdataLiteral) query).getIV());
+ log.trace(((BigdataLiteral) query).getIV().getClass());
}
return bs;
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java 2014-04-02 13:14:09 UTC (rev 8027)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java 2014-04-02 16:13:03 UTC (rev 8028)
@@ -420,5 +420,14 @@
* The default timeout for a free text search (milliseconds).
*/
final long DEFAULT_TIMEOUT = Long.MAX_VALUE;
+
+ /**
+ * Magic predicate to specify that we want a range count done on the search.
+ * Bind the range count to the variable in the object position. Will
+ * attempt to do a fast range count on the index rather than materializing
+ * the hits into an array. This is only possible if matchExact == false
+ * and matchRegex == null.
+ */
+ final URI RANGE_COUNT = new URIImpl(NAMESPACE + "rangeCount");
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <mrp...@us...> - 2014-04-07 20:33:34
|
Revision: 8076
http://sourceforge.net/p/bigdata/code/8076
Author: mrpersonick
Date: 2014-04-07 20:33:31 +0000 (Mon, 07 Apr 2014)
Log Message:
-----------
fixed ticket 831, opened ticket 874
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/831.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/831.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/874.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/874.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket831.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket874.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java 2014-04-07 20:22:03 UTC (rev 8075)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java 2014-04-07 20:33:31 UTC (rev 8076)
@@ -27,6 +27,7 @@
package com.bigdata.rdf.sparql.ast.optimizers;
+import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
@@ -55,6 +56,11 @@
import com.bigdata.rdf.sparql.ast.VarNode;
import com.bigdata.rdf.sparql.ast.eval.AST2BOpContext;
+import cutthecrap.utils.striterators.EmptyIterator;
+import cutthecrap.utils.striterators.Expander;
+import cutthecrap.utils.striterators.SingleValueIterator;
+import cutthecrap.utils.striterators.Striterator;
+
/**
* Examines the source {@link IBindingSet}[]. If there is a single binding set
* in the source, then any variable bound in that input is rewritten in the AST
@@ -152,15 +158,25 @@
final GroupNodeBase<IGroupMemberNode> whereClause,
final IBindingSet bset) {
+ doBindingAssignment(whereClause, Collections.EMPTY_MAP, bset);
+
+ }
+
+ private void doBindingAssignment(
+ final GroupNodeBase<IGroupMemberNode> whereClause,
+ final Map<VarNode, ConstantNode> parentReplacements,
+ final IBindingSet bset) {
+
final Map<VarNode, ConstantNode> replacements = new LinkedHashMap<VarNode, ConstantNode>();
+
+ replacements.putAll(parentReplacements);
- final Iterator<BOp> itr = BOpUtility
- .preOrderIterator((BOp) whereClause);
+ final Iterator<BOp> itr = iterateExcludeGroups(whereClause);
while (itr.hasNext()) {
+
+ final BOp node = itr.next();
- final BOp node = (BOp) itr.next();
-
if (node instanceof FilterNode) {
/*
@@ -213,9 +229,110 @@
if (log.isInfoEnabled())
log.info("Replaced " + ntotal + " instances of "
+ replacements.size() + " bound variables with constants");
+
+ // recurse into the childen
+ for (IGroupMemberNode node : whereClause) {
+ if (node instanceof GroupNodeBase) {
+
+ doBindingAssignment((GroupNodeBase<IGroupMemberNode>) node, replacements, bset);
+
+ }
+
+ }
+
}
+
+ /**
+ * Visits the children (recursively) using pre-order traversal, but does NOT
+ * visit this node.
+ *
+ * @param stack
+ */
+ @SuppressWarnings("unchecked")
+ private Iterator<BOp> iterateExcludeGroups(final BOp op) {
+
+ return iterateExcludeGroups(0, op);
+
+ }
+
+ /**
+ * Visits the children (recursively) using pre-order traversal, but does NOT
+ * visit this node.
+ *
+ * @param stack
+ */
+ @SuppressWarnings("unchecked")
+ private Iterator<BOp> iterateExcludeGroups(final int depth, final BOp op) {
+ /*
+ * Iterator visits the direct children, expanding them in turn with a
+ * recursive application of the pre-order iterator.
+ */
+
+ // mild optimization when no children are present.
+ if (op == null || op.arity() == 0)
+ return EmptyIterator.DEFAULT;
+
+ if (depth > 0 && op instanceof GroupNodeBase)
+ return EmptyIterator.DEFAULT;
+
+ return new Striterator(op.argIterator()).addFilter(new Expander() {
+
+ private static final long serialVersionUID = 1L;
+
+ /*
+ * Expand each child in turn.
+ */
+ protected Iterator expand(final Object childObj) {
+
+ /*
+ * A child of this node.
+ */
+
+ final BOp child = (BOp) childObj;
+
+ /*
+ * TODO The null child reference which can occur here is the [c]
+ * of the StatementPatternNode. We might want to make [c] an
+ * anonymous variable instead of having a [null].
+ */
+ if (child != null && child.arity() > 0) {
+
+ /*
+ * The child is a Node (has children).
+ *
+ * Visit the children (recursive pre-order traversal).
+ */
+
+ // append this node in pre-order position.
+ final Striterator itr = new Striterator(
+ new SingleValueIterator(child));
+
+ // append children
+ itr.append(iterateExcludeGroups(depth + 1, child));
+
+ return itr;
+
+ } else {
+
+ /*
+ * The child is a leaf.
+ */
+
+ // Visit the leaf itself.
+ return new SingleValueIterator(child);
+
+ }
+
+ }
+
+ });
+
+ }
+
+
+
/**
* Gather the VarNodes for variables which have bindings.
*
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/831.rq
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/831.rq (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/831.rq 2014-04-07 20:33:31 UTC (rev 8076)
@@ -0,0 +1,20 @@
+PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
+PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
+PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
+
+select ?s ?p ?o
+
+where {
+
+ {
+ ?s ?p ?o.
+ filter(?s = <http://example.org/data/person1>)
+ }
+ UNION
+ {
+ ?s ?p ?o.
+ filter(?s = <http://example.org/data/person2>)
+
+ }
+
+}
\ No newline at end of file
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/831.ttl
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/831.ttl (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/831.ttl 2014-04-07 20:33:31 UTC (rev 8076)
@@ -0,0 +1,11 @@
+@prefix : <http://example.org/data/> .
+
+:person1
+ a :Person ;
+ :age 21;
+ :name "Person 1".
+
+:person2
+ a :Person ;
+ :age 11;
+ :name "Person 2".
\ No newline at end of file
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/874.rq
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/874.rq (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/874.rq 2014-04-07 20:33:31 UTC (rev 8076)
@@ -0,0 +1,17 @@
+prefix xsd: <http://www.w3.org/2001/XMLSchema#>
+SELECT *
+where {
+?user <http://arvados.org/schema/api_token> <token:ckedd> .
+{
+ ?user <http://arvados.org/schema/user_is_admin> true .
+ ?s ?p ?o .
+ FILTER strStarts(str(?s), "http://arvados.org/schema/modified") .
+}
+union
+{
+ ?user <http://arvados.org/schema/user_is_admin> false .
+ ?user <http://arvados.org/schema/permission/can_read> ?s .
+ ?s ?p ?o .
+ FILTER strStarts(str(?s), "http://arvados.org/schema/modified") .
+}
+}
\ No newline at end of file
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/874.ttl
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/874.ttl (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/874.ttl 2014-04-07 20:33:31 UTC (rev 8076)
@@ -0,0 +1,4 @@
+@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
+<http://qr1hi/user/12345> <http://arvados.org/schema/api_token> <token:ckedd> .
+<http://qr1hi/user/12345> <http://arvados.org/schema/user_is_admin> "true"^^xsd:boolean .
+<http://arvados.org/schema/modified_at> <http://rdf#type> <http://rdfs#Property> .
\ No newline at end of file
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket831.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket831.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket831.java 2014-04-07 20:33:31 UTC (rev 8076)
@@ -0,0 +1,177 @@
+/**
+Copyright (C) SYSTAP, LLC 2011. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+package com.bigdata.rdf.sail;
+
+import java.util.Collection;
+import java.util.LinkedList;
+import java.util.Properties;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.log4j.Logger;
+import org.openrdf.model.impl.URIImpl;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.QueryLanguage;
+import org.openrdf.query.TupleQueryResult;
+import org.openrdf.query.impl.BindingImpl;
+import org.openrdf.repository.RepositoryConnection;
+import org.openrdf.repository.sail.SailTupleQuery;
+import org.openrdf.rio.RDFFormat;
+
+import com.bigdata.rdf.axioms.NoAxioms;
+import com.bigdata.rdf.vocab.NoVocabulary;
+
+/**
+ * Unit test template for use in submission of bugs.
+ * <p>
+ * This test case will delegate to an underlying backing store. You can
+ * specify this store via a JVM property as follows:
+ * <code>-DtestClass=com.bigdata.rdf.sail.TestBigdataSailWithQuads</code>
+ * <p>
+ * There are three possible configurations for the testClass:
+ * <ul>
+ * <li>com.bigdata.rdf.sail.TestBigdataSailWithQuads (quads mode)</li>
+ * <li>com.bigdata.rdf.sail.TestBigdataSailWithoutSids (triples mode)</li>
+ * <li>com.bigdata.rdf.sail.TestBigdataSailWithSids (SIDs mode)</li>
+ * </ul>
+ * <p>
+ * The default for triples and SIDs mode is for inference with truth maintenance
+ * to be on. If you would like to turn off inference, make sure to do so in
+ * {@link #getProperties()}.
+ *
+ * @author <a href="mailto:mrp...@us...">Mike Personick</a>
+ * @version $Id$
+ */
+public class TestTicket831 extends ProxyBigdataSailTestCase {
+
+ protected static final Logger log = Logger.getLogger(TestTicket831.class);
+
+ /**
+ * Please set your database properties here, except for your journal file,
+ * please DO NOT SPECIFY A JOURNAL FILE.
+ */
+ @Override
+ public Properties getProperties() {
+
+ Properties props = super.getProperties();
+
+ /*
+ * For example, here is a set of five properties that turns off
+ * inference, truth maintenance, and the free text index.
+ */
+ props.setProperty(BigdataSail.Options.AXIOMS_CLASS, NoAxioms.class.getName());
+ props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, NoVocabulary.class.getName());
+ props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false");
+ props.setProperty(BigdataSail.Options.JUSTIFY, "false");
+ props.setProperty(BigdataSail.Options.TEXT_INDEX, "false");
+
+ return props;
+
+ }
+
+ public TestTicket831() {
+ }
+
+ public TestTicket831(String arg0) {
+ super(arg0);
+ }
+
+ public void testBug1() throws Exception {
+
+ /*
+ * The bigdata store, backed by a temporary journal file.
+ */
+ final BigdataSail bigdataSail = getSail();
+
+ /*
+ * Data file containing the data demonstrating your bug.
+ */
+ final String data = "831.ttl";
+ final String baseURI = "";
+ final RDFFormat format = RDFFormat.TURTLE;
+
+ try {
+
+ bigdataSail.initialize();
+
+ final BigdataSailRepository bigdataRepo = new BigdataSailRepository(bigdataSail);
+
+ { // load the data into the bigdata store
+
+ final RepositoryConnection cxn = bigdataRepo.getConnection();
+ try {
+ cxn.setAutoCommit(false);
+ cxn.add(getClass().getResourceAsStream(data), baseURI, format);
+// cxn.add(data);
+ cxn.commit();
+ } finally {
+ cxn.close();
+ }
+
+ }
+
+ {
+// final Collection<BindingSet> answer = new LinkedList<BindingSet>();
+// answer.add(createBindingSet(
+// new BindingImpl("sub", new URIImpl("http://example.org/B"))
+// ));
+
+ final String query = IOUtils.toString(getClass().getResourceAsStream("831.rq"));
+
+ if (log.isInfoEnabled()) {
+ log.info("running query:\n" + query);
+ }
+
+ /*
+ * Run the problem query using the bigdata store and then compare
+ * the answer.
+ */
+ final RepositoryConnection cxn = bigdataRepo.getReadOnlyConnection();
+ try {
+
+ final SailTupleQuery tupleQuery = (SailTupleQuery)
+ cxn.prepareTupleQuery(QueryLanguage.SPARQL, query);
+ tupleQuery.setIncludeInferred(false /* includeInferred */);
+
+ final TupleQueryResult result = tupleQuery.evaluate();
+// compare(result, answer);
+
+ while (result.hasNext()) {
+ log.info(result.next());
+ }
+
+ } finally {
+ cxn.close();
+ }
+
+ }
+
+ } finally {
+
+ bigdataSail.__tearDownUnitTest();
+
+ }
+
+ }
+
+}
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket831.java
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket874.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket874.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket874.java 2014-04-07 20:33:31 UTC (rev 8076)
@@ -0,0 +1,177 @@
+/**
+Copyright (C) SYSTAP, LLC 2011. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+package com.bigdata.rdf.sail;
+
+import java.util.Collection;
+import java.util.LinkedList;
+import java.util.Properties;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.log4j.Logger;
+import org.openrdf.model.impl.URIImpl;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.QueryLanguage;
+import org.openrdf.query.TupleQueryResult;
+import org.openrdf.query.impl.BindingImpl;
+import org.openrdf.repository.RepositoryConnection;
+import org.openrdf.repository.sail.SailTupleQuery;
+import org.openrdf.rio.RDFFormat;
+
+import com.bigdata.rdf.axioms.NoAxioms;
+import com.bigdata.rdf.vocab.NoVocabulary;
+
+/**
+ * Unit test template for use in submission of bugs.
+ * <p>
+ * This test case will delegate to an underlying backing store. You can
+ * specify this store via a JVM property as follows:
+ * <code>-DtestClass=com.bigdata.rdf.sail.TestBigdataSailWithQuads</code>
+ * <p>
+ * There are three possible configurations for the testClass:
+ * <ul>
+ * <li>com.bigdata.rdf.sail.TestBigdataSailWithQuads (quads mode)</li>
+ * <li>com.bigdata.rdf.sail.TestBigdataSailWithoutSids (triples mode)</li>
+ * <li>com.bigdata.rdf.sail.TestBigdataSailWithSids (SIDs mode)</li>
+ * </ul>
+ * <p>
+ * The default for triples and SIDs mode is for inference with truth maintenance
+ * to be on. If you would like to turn off inference, make sure to do so in
+ * {@link #getProperties()}.
+ *
+ * @author <a href="mailto:mrp...@us...">Mike Personick</a>
+ * @version $Id$
+ */
+public class TestTicket874 extends ProxyBigdataSailTestCase {
+
+ protected static final Logger log = Logger.getLogger(TestTicket874.class);
+
+ /**
+ * Please set your database properties here, except for your journal file,
+ * please DO NOT SPECIFY A JOURNAL FILE.
+ */
+ @Override
+ public Properties getProperties() {
+
+ Properties props = super.getProperties();
+
+ /*
+ * For example, here is a set of five properties that turns off
+ * inference, truth maintenance, and the free text index.
+ */
+ props.setProperty(BigdataSail.Options.AXIOMS_CLASS, NoAxioms.class.getName());
+ props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, NoVocabulary.class.getName());
+ props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false");
+ props.setProperty(BigdataSail.Options.JUSTIFY, "false");
+ props.setProperty(BigdataSail.Options.TEXT_INDEX, "false");
+
+ return props;
+
+ }
+
+ public TestTicket874() {
+ }
+
+ public TestTicket874(String arg0) {
+ super(arg0);
+ }
+
+ public void testBug1() throws Exception {
+
+ /*
+ * The bigdata store, backed by a temporary journal file.
+ */
+ final BigdataSail bigdataSail = getSail();
+
+ /*
+ * Data file containing the data demonstrating your bug.
+ */
+ final String data = "874.ttl";
+ final String baseURI = "";
+ final RDFFormat format = RDFFormat.TURTLE;
+
+ try {
+
+ bigdataSail.initialize();
+
+ final BigdataSailRepository bigdataRepo = new BigdataSailRepository(bigdataSail);
+
+ { // load the data into the bigdata store
+
+ final RepositoryConnection cxn = bigdataRepo.getConnection();
+ try {
+ cxn.setAutoCommit(false);
+ cxn.add(getClass().getResourceAsStream(data), baseURI, format);
+// cxn.add(data);
+ cxn.commit();
+ } finally {
+ cxn.close();
+ }
+
+ }
+
+ {
+// final Collection<BindingSet> answer = new LinkedList<BindingSet>();
+// answer.add(createBindingSet(
+// new BindingImpl("sub", new URIImpl("http://example.org/B"))
+// ));
+
+ final String query = IOUtils.toString(getClass().getResourceAsStream("874.rq"));
+
+ if (log.isInfoEnabled()) {
+ log.info("running query:\n" + query);
+ }
+
+ /*
+ * Run the problem query using the bigdata store and then compare
+ * the answer.
+ */
+ final RepositoryConnection cxn = bigdataRepo.getReadOnlyConnection();
+ try {
+
+ final SailTupleQuery tupleQuery = (SailTupleQuery)
+ cxn.prepareTupleQuery(QueryLanguage.SPARQL, query);
+ tupleQuery.setIncludeInferred(false /* includeInferred */);
+
+ final TupleQueryResult result = tupleQuery.evaluate();
+// compare(result, answer);
+
+ while (result.hasNext()) {
+ log.info(result.next());
+ }
+
+ } finally {
+ cxn.close();
+ }
+
+ }
+
+ } finally {
+
+ bigdataSail.__tearDownUnitTest();
+
+ }
+
+ }
+
+}
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket874.java
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-04-08 13:08:14
|
Revision: 8084
http://sourceforge.net/p/bigdata/code/8084
Author: thompsonbry
Date: 2014-04-08 13:08:10 +0000 (Tue, 08 Apr 2014)
Log Message:
-----------
Bug fix for POST of CANCEL on follower in HA mode.
See #883
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java 2014-04-08 12:22:11 UTC (rev 8083)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java 2014-04-08 13:08:10 UTC (rev 8084)
@@ -122,7 +122,7 @@
* Do CANCEL for each service using the SPARQL end point associated with
* a non-default namespace:
*
- * /sparql/namespace/NAMESPACE
+ * /namespace/NAMESPACE/sparql
*/
{
final String namespace = "kb";
@@ -146,7 +146,7 @@
* instance associated with the given <i>namespace</i>. The
* {@link RemoteRepository} will use a URL for the SPARQL end point that is
* associated with the specified namespace and formed as
- * <code>/sparql/namespace/<i>namespace</i></code> rather than the default
+ * <code>/namespace/<i>namespace</i>/sparql</code> rather than the default
* KB SPARQL end point (<code>/sparql</code>).
*
* @param haGlue
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-04-08 12:22:11 UTC (rev 8083)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-04-08 13:08:10 UTC (rev 8084)
@@ -106,20 +106,20 @@
protected void doPost(final HttpServletRequest req,
final HttpServletResponse resp) throws IOException {
- if (!isWritable(req, resp)) {
- // Service must be writable.
- return;
- }
+ if (req.getRequestURI().endsWith("/namespace")) {
- if (req.getRequestURI().endsWith("/namespace")) {
-
+ // CREATE NAMESPACE.
doCreateNamespace(req, resp);
return;
}
- // Pass through to the SPARQL end point REST API.
+ /*
+ * Pass through to the SPARQL end point REST API.
+ *
+ * Note: This also handles CANCEL QUERY, which is a POST.
+ */
m_restServlet.doPost(req, resp);
}
@@ -220,6 +220,11 @@
private void doCreateNamespace(final HttpServletRequest req,
final HttpServletResponse resp) throws IOException {
+ if (!isWritable(req, resp)) {
+ // Service must be writable.
+ return;
+ }
+
final BigdataRDFContext context = getBigdataRDFContext();
final IIndexManager indexManager = context.getIndexManager();
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-04-08 12:22:11 UTC (rev 8083)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-04-08 13:08:10 UTC (rev 8084)
@@ -579,8 +579,7 @@
*/
try {
EntityUtils.consume(response.getEntity());
- } catch (IOException ex) {
- }
+ } catch (IOException ex) {log.warn(ex); }
}
}
@@ -637,7 +636,7 @@
if (resp != null)
EntityUtils.consume(resp.getEntity());
- } catch (Exception ex) { }
+ } catch (Exception ex) {log.warn(ex); }
}
@@ -690,7 +689,7 @@
if (resp != null)
EntityUtils.consume(resp.getEntity());
- } catch (Exception ex) { }
+ } catch (Exception ex) {log.warn(ex); }
}
@@ -749,7 +748,7 @@
if (response != null)
EntityUtils.consume(response.getEntity());
- } catch (Exception ex) { }
+ } catch (Exception ex) {log.warn(ex); }
}
@@ -829,7 +828,7 @@
if (response != null)
EntityUtils.consume(response.getEntity());
- } catch (Exception ex) { }
+ } catch (Exception ex) {log.warn(ex); }
}
@@ -917,7 +916,7 @@
if (response != null)
EntityUtils.consume(response.getEntity());
- } catch (Exception ex) { }
+ } catch (Exception ex) {log.warn(ex); }
}
@@ -1487,7 +1486,7 @@
// conn.disconnect();
} catch (Throwable t2) {
- // ignored.
+ log.warn(t2); // ignored.
}
throw new RuntimeException(sparqlEndpointURL + " : " + t, t);
}
@@ -1665,7 +1664,7 @@
try {
cancel(queryId);
- } catch (Exception ex) { }
+ } catch (Exception ex) {log.warn(ex); }
}
@@ -1693,13 +1692,13 @@
if (entity != null && result == null) {
try {
EntityUtils.consume(entity);
- } catch (IOException ex) { }
+ } catch (IOException ex) {log.warn(ex); }
}
if (response != null && tqrImpl == null) {
try {
cancel(queryId);
- } catch(Exception ex) { }
+ } catch(Exception ex) {log.warn(ex); }
}
}
@@ -1811,7 +1810,7 @@
try {
cancel(queryId);
- } catch (Exception ex) { }
+ } catch (Exception ex) {log.warn(ex); }
}
@@ -1843,11 +1842,11 @@
if (response != null && result == null) {
try {
EntityUtils.consume(entity);
- } catch (IOException ex) { }
+ } catch (IOException ex) {log.warn(ex); }
try {
cancel(queryId);
- } catch (Exception ex) { }
+ } catch (Exception ex) {log.warn(ex); }
}
}
@@ -1912,11 +1911,11 @@
if (result == null) {
try {
EntityUtils.consume(entity);
- } catch (IOException ex) { }
+ } catch (IOException ex) {log.warn(ex); }
try {
cancel(queryId);
- } catch (Exception ex) { }
+ } catch (Exception ex) {log.warn(ex); }
}
}
@@ -1996,7 +1995,7 @@
// response.disconnect();
try {
EntityUtils.consume(entity);
- } catch (IOException ex) { }
+ } catch (IOException ex) {log.warn(ex); }
}
@@ -2058,7 +2057,7 @@
// response.disconnect();
try {
EntityUtils.consume(entity);
- } catch (IOException ex) { }
+ } catch (IOException ex) {log.warn(ex); }
}
@@ -2120,7 +2119,7 @@
// response.disconnect();
try {
EntityUtils.consume(entity);
- } catch (IOException ex) { }
+ } catch (IOException ex) {log.warn(ex); }
}
@@ -2178,7 +2177,7 @@
// response.disconnect();
try {
EntityUtils.consume(entity);
- } catch (IOException ex) { }
+ } catch (IOException ex) {log.warn(ex); }
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-04-15 17:08:46
|
Revision: 8123
http://sourceforge.net/p/bigdata/code/8123
Author: thompsonbry
Date: 2014-04-15 17:08:37 +0000 (Tue, 15 Apr 2014)
Log Message:
-----------
Merge of the HA1/HA5 branch back to the main development branch. This closes out #722 (HA1). #723 remains open with one test that fails in CI.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-D.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-E.properties
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties
Property Changed:
----------------
branches/BIGDATA_RELEASE_1_3_0/
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/util/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/jsr166/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/util/httpd/
branches/BIGDATA_RELEASE_1_3_0/bigdata-compatibility/
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/attr/
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/disco/
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/util/config/
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/
branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/
branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/btc/
branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/btc/src/resources/
branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/lubm/
branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/uniprot/
branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/uniprot/src/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/error/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/relation/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/util/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/samples/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/internal/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/relation/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/LEGAL/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/lib/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/java/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/java/it/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/java/it/unimi/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/test/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/test/it/unimi/
branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/test/it/unimi/dsi/
branches/BIGDATA_RELEASE_1_3_0/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/
branches/BIGDATA_RELEASE_1_3_0/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/
branches/BIGDATA_RELEASE_1_3_0/osgi/
branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/config/
Index: branches/BIGDATA_RELEASE_1_3_0
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0 2014-04-15 17:08:37 UTC (rev 8123)
Property changes on: branches/BIGDATA_RELEASE_1_3_0
___________________________________________________________________
Modified: svn:mergeinfo
## -1,3 +1,4 ##
+/branches/BIGDATA_MGC_HA1_HA5:8025-8122
/branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785
/branches/BIGDATA_RELEASE_1_2_0:6766-7380
/branches/BTREE_BUFFER_BRANCH:2004-2045
\ No newline at end of property
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty 2014-04-15 17:08:37 UTC (rev 8123)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty
___________________________________________________________________
Modified: svn:mergeinfo
## -1,3 +1,4 ##
+/branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty:8025-8122
/branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380
/branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522
\ No newline at end of property
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate 2014-04-15 17:08:37 UTC (rev 8123)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate
___________________________________________________________________
Modified: svn:mergeinfo
## -1,3 +1,4 ##
+/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate:8025-8122
/branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522
\ No newline at end of property
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph 2014-04-15 17:08:37 UTC (rev 8123)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph
___________________________________________________________________
Modified: svn:mergeinfo
## -1,3 +1,4 ##
+/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph:8025-8122
/branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522
\ No newline at end of property
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util 2014-04-15 17:08:37 UTC (rev 8123)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util
___________________________________________________________________
Modified: svn:mergeinfo
## -1,3 +1,4 ##
+/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util:8025-8122
/branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522
\ No newline at end of property
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba 2014-04-15 17:08:37 UTC (rev 8123)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba
___________________________________________________________________
Modified: svn:mergeinfo
## -1,3 +1,4 ##
+/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/htree/raba:8025-8122
/branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522
\ No newline at end of property
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-15 17:08:37 UTC (rev 8123)
@@ -137,6 +137,13 @@
}
+ /**
+ * Used to zero pad slots in buffered writes.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/721#comment:10"> HA1 </a>
+ */
+ static private final byte[] s_zeros = new byte[256];
+
/**
* Buffer a write.
*
@@ -188,6 +195,19 @@
}
// copy the caller's record into the buffer.
m_data.put(data);
+
+ // if data_len < slot_len then clear remainder of buffer
+ int padding = slot_len - data_len;
+ while (padding > 0) {
+ if (padding > s_zeros.length) {
+ m_data.put(s_zeros);
+ padding -= s_zeros.length;
+ } else {
+ m_data.put(s_zeros, 0, padding);
+ break;
+ }
+ }
+
// update the file offset by the size of the allocation slot
m_endAddr += slot_len;
// update the buffer position by the size of the allocation slot.
@@ -250,8 +270,9 @@
final ByteBuffer m_data = tmp.buffer();
// reset the buffer state.
- m_data.position(0);
- m_data.limit(m_data.capacity());
+ //m_data.position(0);
+ //m_data.limit(m_data.capacity());
+ m_data.clear();
m_startAddr = -1;
m_endAddr = 0;
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2014-04-15 17:08:37 UTC (rev 8123)
@@ -310,7 +310,7 @@
/**
* When a record is used as a read cache then the readCount is
- * maintained as a metric on its access. �This could be used to
+ * maintained as a metric on its access. This could be used to
* determine eviction/compaction.
* <p>
* Note: volatile to guarantee visibility of updates. Might do better
@@ -509,7 +509,8 @@
* @param isHighlyAvailable
* when <code>true</code> the whole record checksum is maintained
* for use when replicating the write cache along the write
- * pipeline.
+ * pipeline. This needs to be <code>true</code> for HA1 as well
+ * since we need to write the HALog.
* @param bufferHasData
* when <code>true</code> the caller asserts that the buffer has
* data (from a replicated write), in which case the position
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-15 17:08:37 UTC (rev 8123)
@@ -1151,6 +1151,7 @@
done = WriteCache.transferTo(cache/* src */,
curCompactingCache/* dst */, serviceMap, 0/*threshold*/);
if (done) {
+ // Everything was compacted. Send just the address metadata (empty cache block).
sendAddressMetadata(cache);
if (log.isDebugEnabled())
@@ -1231,7 +1232,7 @@
* been allocated on the leader in the same order in which the leader
* made those allocations. This information is used to infer the order
* in which the allocators for the different allocation slot sizes are
- * created. This method will synchronous send those address notices and
+ * created. This method will synchronously send those address notices and
* and also makes sure that the followers see the recycled addresses
* records so they can keep both their allocators and the actual
* allocations synchronized with the leader.
@@ -1244,13 +1245,15 @@
* @throws InterruptedException
* @throws ExecutionException
* @throws IOException
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a>
*/
private void sendAddressMetadata(final WriteCache cache)
throws IllegalStateException, InterruptedException,
ExecutionException, IOException {
- if (quorum == null || !quorum.isHighlyAvailable()
- || !quorum.getClient().isLeader(quorumToken)) {
+ if (quorum == null) { //|| !quorum.isHighlyAvailable()
+// || !quorum.getClient().isLeader(quorumToken)) {
return;
}
@@ -1344,20 +1347,15 @@
private void writeCacheBlock(final WriteCache cache)
throws InterruptedException, ExecutionException, IOException {
- /*
- * IFF HA
+ /**
+ * IFF HA and this is the quorum leader.
*
- * TODO isHA should be true even if the quorum is not highly
- * available since there still could be other services in the write
- * pipeline (e.g., replication to an offline HAJournalServer prior
- * to changing over into an HA3 quorum or off-site replication). The
- * unit tests need to be updated to specify [isHighlyAvailable] for
- * ALL quorum based test runs.
+ * Note: This is true for HA1 as well. The code path enabled by this
+ * is responsible for writing the HALog files.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a>
*/
- final boolean isHA = quorum != null && quorum.isHighlyAvailable();
-
- // IFF HA and this is the quorum leader.
- final boolean isHALeader = isHA
+ final boolean isHALeader = quorum != null
&& quorum.getClient().isLeader(quorumToken);
/*
@@ -1438,15 +1436,25 @@
* then clean up the documentation here (see the commented
* out version of this line below).
*/
- quorumMember.logWriteCacheBlock(pkg.getMessage(), pkg.getData().duplicate());
-
- // ASYNC MSG RMI + NIO XFER.
- remoteWriteFuture = quorumMember.replicate(null/* req */, pkg.getMessage(),
- pkg.getData().duplicate());
-
- counters.get().nsend++;
+ quorumMember.logWriteCacheBlock(pkg.getMessage(), pkg.getData().duplicate());
/*
+ * TODO Do we want to always support the replication code path
+ * when a quorum exists (that is, also for HA1) in case there
+ * are pipeline listeners that are not HAJournalServer
+ * instances? E.g., for offsite replication?
+ */
+ if (quorum.replicationFactor() > 1) {
+
+ // ASYNC MSG RMI + NIO XFER.
+ remoteWriteFuture = quorumMember.replicate(null/* req */,
+ pkg.getMessage(), pkg.getData().duplicate());
+
+ counters.get().nsend++;
+
+ }
+
+ /*
* The quorum leader logs the write cache block here. For the
* followers, the write cache blocks are currently logged by
* HAJournalServer.
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-15 17:08:37 UTC (rev 8123)
@@ -2473,18 +2473,18 @@
}
- /**
- * Return <code>true</code> if the journal is configured for high
- * availability.
- *
- * @see QuorumManager#isHighlyAvailable()
- */
- public boolean isHighlyAvailable() {
+// /**
+// * Return <code>true</code> if the journal is configured for high
+// * availability.
+// *
+// * @see Quorum#isHighlyAvailable()
+// */
+// public boolean isHighlyAvailable() {
+//
+// return quorum == null ? false : quorum.isHighlyAvailable();
+//
+// }
- return quorum == null ? false : quorum.isHighlyAvailable();
-
- }
-
/**
* {@inheritDoc}
* <p>
@@ -3428,8 +3428,10 @@
if (quorum == null)
return;
- if (!quorum.isHighlyAvailable())
+ if (!quorum.isHighlyAvailable()) {
+ // Gather and 2-phase commit are not used in HA1.
return;
+ }
/**
* CRITICAL SECTION. We need obtain a distributed consensus for the
@@ -3542,6 +3544,25 @@
// reload the commit record from the new root block.
store._commitRecord = store._getCommitRecord();
+ if (quorum != null) {
+ /**
+ * Write the root block on the HALog file, closing out that
+ * file.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a>
+ */
+ final QuorumService<HAGlue> localService = quorum.getClient();
+ if (localService != null) {
+ // Quorum service not asynchronously closed.
+ try {
+ // Write the closing root block on the HALog file.
+ localService.logRootBlock(newRootBlock);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+
if (txLog.isInfoEnabled())
txLog.info("COMMIT: commitTime=" + commitTime);
@@ -3846,9 +3867,9 @@
// Prepare the new root block.
cs.newRootBlock();
- if (quorum == null) {
+ if (quorum == null || quorum.replicationFactor() == 1) {
- // Non-HA mode.
+ // Non-HA mode (including HA1).
cs.commitSimple();
} else {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2014-04-15 17:08:37 UTC (rev 8123)
@@ -146,6 +146,7 @@
}
+ @Override
public ByteBuffer read(final long addr) {
try {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2014-04-15 17:08:37 UTC (rev 8123)
@@ -272,7 +272,7 @@
* which use this flag to conditionally track the checksum of the entire
* write cache buffer).
*/
- private final boolean isHighlyAvailable;
+ private final boolean isQuorumUsed;
/**
* The {@link UUID} which identifies the journal (this is the same for each
@@ -970,11 +970,11 @@
com.bigdata.journal.Options.HALOG_COMPRESSOR,
com.bigdata.journal.Options.DEFAULT_HALOG_COMPRESSOR);
- isHighlyAvailable = quorum != null && quorum.isHighlyAvailable();
+ isQuorumUsed = quorum != null; // && quorum.isHighlyAvailable();
final boolean useWriteCacheService = fileMetadata.writeCacheEnabled
&& !fileMetadata.readOnly && fileMetadata.closeTime == 0L
- || isHighlyAvailable;
+ || isQuorumUsed;
if (useWriteCacheService) {
/*
@@ -1049,7 +1049,7 @@
final long fileExtent)
throws InterruptedException {
- super(baseOffset, buf, useChecksum, isHighlyAvailable,
+ super(baseOffset, buf, useChecksum, isQuorumUsed,
bufferHasData, opener, fileExtent);
}
@@ -1379,6 +1379,7 @@
* to get the data from another node based on past experience for that
* record.
*/
+ @Override
public ByteBuffer read(final long addr) {
try {
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166 2014-04-15 17:08:37 UTC (rev 8123)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166
___________________________________________________________________
Modified: svn:mergeinfo
## -1,3 +1,4 ##
+/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/jsr166:8025-8122
/branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/jsr166:6769-6785
/branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166:6766-7380
/branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522
\ No newline at end of property
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2014-04-15 17:08:37 UTC (rev 8123)
@@ -839,6 +839,7 @@
m_statsBucket.allocate(size);
}
+
return value;
} else {
StringBuilder sb = new StringBuilder();
@@ -1300,4 +1301,33 @@
return count;
}
+ /**
+ * Determines if the provided physical address is within an allocated slot
+ * @param addr
+ * @return
+ */
+ public boolean verifyAllocatedAddress(long addr) {
+ if (log.isTraceEnabled())
+ log.trace("Checking Allocator " + m_index + ", size: " + m_size);
+
+ final Iterator<AllocBlock> blocks = m_allocBlocks.iterator();
+ final long range = m_size * m_bitSize * 32;
+ while (blocks.hasNext()) {
+ final int startAddr = blocks.next().m_addr;
+ if (startAddr != 0) {
+ final long start = RWStore.convertAddr(startAddr);
+ final long end = start + range;
+
+ if (log.isTraceEnabled())
+ log.trace("Checking " + addr + " between " + start + " - " + end);
+
+ if (addr >= start && addr < end)
+ return true;
+ } else {
+ break;
+ }
+ }
+ return false;
+ }
+
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-15 14:16:12 UTC (rev 8122)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-15 17:08:37 UTC (rev 8123)
@@ -688,7 +688,7 @@
throws InterruptedException {
super(buf, useChecksum, m_quorum != null
- && m_quorum.isHighlyAvailable(), bufferHasData, opener,
+ /*&& m_quorum.isHighlyAvailable()*/, bufferHasData, opener,
fileExtent,
m_bufferedWrite);
@@ -1080,16 +1080,17 @@
private RWWriteCacheService newWriteCacheService() {
try {
- final boolean highlyAvailable = m_quorum != null
- && m_quorum.isHighlyAvailable();
+// final boolean highlyAvailable = m_quorum != null
+// && m_quorum.isHighlyAvailable();
- final boolean prefixWrites = highlyAvailable;
+ final boolean prefixWrites = m_quorum != null; // highlyAvailable
return new RWWriteCacheService(m_writeCacheBufferCount,
m_minCleanListSize, m_readCacheBufferCount, prefixWrites, m_compactionThreshold, m_hotCacheSize, m_hotCacheThreshold,
convertAddr(m_fileSize), m_reopener, m_quorum, this) {
-
+
+ @Override
@SuppressWarnings("unchecked")
...
[truncated message content] |
|
From: <tho...@us...> - 2014-04-22 16:22:20
|
Revision: 8137
http://sourceforge.net/p/bigdata/code/8137
Author: thompsonbry
Date: 2014-04-22 16:22:14 +0000 (Tue, 22 Apr 2014)
Log Message:
-----------
Added test for interrupted() for every 20 solutions processed by the ConditionalRoutingOp.
Added test for interrupted() for each RDF Value tested by the RegexBOp.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java 2014-04-21 23:18:59 UTC (rev 8136)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java 2014-04-22 16:22:14 UTC (rev 8137)
@@ -1,243 +1,251 @@
-/**
-
-Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-/*
- * Created on Aug 25, 2010
- */
-
-package com.bigdata.bop.bset;
-
-import java.util.Arrays;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.FutureTask;
-
-import com.bigdata.bop.BOp;
-import com.bigdata.bop.BOpContext;
-import com.bigdata.bop.IBindingSet;
-import com.bigdata.bop.IConstraint;
-import com.bigdata.bop.NV;
-import com.bigdata.bop.PipelineOp;
-import com.bigdata.bop.engine.BOpStats;
-import com.bigdata.relation.accesspath.IBlockingBuffer;
-
-import cutthecrap.utils.striterators.ICloseableIterator;
-
-/**
- * An operator for conditional routing of binding sets in a pipeline. The
- * operator will copy binding sets either to the default sink (if a condition is
- * satisfied) and otherwise to the alternate sink (iff one is specified). If a
- * solution fails the constraint and the alternate sink is not specified, then
- * the solution is dropped.
- * <p>
- * Conditional routing can be useful where a different data flow is required
- * based on the type of an object (for example a term identifier versus an
- * inline term in the RDF database) or where there is a need to jump around a
- * join group based on some condition.
- * <p>
- * Conditional routing will cause reordering of solutions when the alternate
- * sink is specified as some solutions will flow to the primary sink while
- * others flow to the alterate sink.
- *
- * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id: ConditionalRoutingOp.java 7773 2014-01-11 12:49:05Z thompsonbry
- * $
- */
-public class ConditionalRoutingOp extends PipelineOp {
-
- /**
- *
- */
- private static final long serialVersionUID = 1L;
-
- public interface Annotations extends PipelineOp.Annotations {
-
- /**
- * An {@link IConstraint} which specifies the condition. When the
- * condition is satisfied the binding set is routed to the default sink.
- * When the condition is not satisfied, the binding set is routed to the
- * alternative sink.
- */
- String CONDITION = ConditionalRoutingOp.class.getName() + ".condition";
-
- }
-
- /**
- * Deep copy constructor.
- *
- * @param op
- */
- public ConditionalRoutingOp(final ConditionalRoutingOp op) {
-
- super(op);
-
- }
-
- /**
- * Shallow copy constructor.
- *
- * @param args
- * @param annotations
- */
- public ConditionalRoutingOp(final BOp[] args,
- final Map<String, Object> annotations) {
-
- super(args, annotations);
-
- }
-
- public ConditionalRoutingOp(final BOp[] args, final NV... anns) {
-
- this(args, NV.asMap(anns));
-
- }
-
- /**
- * @see Annotations#CONDITION
- */
- public IConstraint getCondition() {
-
- return (IConstraint) getProperty(Annotations.CONDITION);
-
- }
-
- @Override
- public FutureTask<Void> eval(final BOpContext<IBindingSet> context) {
-
- return new FutureTask<Void>(new ConditionalRouteTask(this, context));
-
- }
-
- /**
- * Copy the source to the sink or the alternative sink depending on the
- * condition.
- */
- static private class ConditionalRouteTask implements Callable<Void> {
-
- private final BOpStats stats;
-
- private final IConstraint condition;
-
- private final ICloseableIterator<IBindingSet[]> source;
-
- private final IBlockingBuffer<IBindingSet[]> sink;
-
- private final IBlockingBuffer<IBindingSet[]> sink2;
-
- ConditionalRouteTask(final ConditionalRoutingOp op,
- final BOpContext<IBindingSet> context) {
-
- this.stats = context.getStats();
-
- this.condition = op.getCondition();
-
- if (condition == null)
- throw new IllegalArgumentException();
-
- this.source = context.getSource();
-
- this.sink = context.getSink();
-
- this.sink2 = context.getSink2(); // MAY be null.
-
-// if (sink2 == null)
-// throw new IllegalArgumentException();
-
- if (sink == sink2)
- throw new IllegalArgumentException();
-
- }
-
- @Override
- public Void call() throws Exception {
- try {
- while (source.hasNext()) {
-
- final IBindingSet[] chunk = source.next();
-
- stats.chunksIn.increment();
- stats.unitsIn.add(chunk.length);
-
- final IBindingSet[] def = new IBindingSet[chunk.length];
- final IBindingSet[] alt = sink2 == null ? null
- : new IBindingSet[chunk.length];
-
- int ndef = 0, nalt = 0;
-
- for (int i = 0; i < chunk.length; i++) {
-
- final IBindingSet bset = chunk[i].clone();
-
- if (condition.accept(bset)) {
-
- // solution passes condition. default sink.
- def[ndef++] = bset;
-
- } else if (sink2 != null) {
-
- // solution fails condition. alternative sink.
- alt[nalt++] = bset;
-
- }
-
- }
-
- if (ndef > 0) {
- if (ndef == def.length)
- sink.add(def);
- else
- sink.add(Arrays.copyOf(def, ndef));
-// stats.chunksOut.increment();
-// stats.unitsOut.add(ndef);
- }
-
- if (nalt > 0 && sink2 != null) {
- if (nalt == alt.length)
- sink2.add(alt);
- else
- sink2.add(Arrays.copyOf(alt, nalt));
-// stats.chunksOut.increment();
-// stats.unitsOut.add(nalt);
- }
-
- }
-
- sink.flush();
- if (sink2 != null)
- sink2.flush();
-
- return null;
-
- } finally {
- source.close();
- sink.close();
- if (sink2 != null)
- sink2.close();
-
- }
-
- } // call()
-
- } // ConditionalRoutingTask.
-
-}
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*
+ * Created on Aug 25, 2010
+ */
+
+package com.bigdata.bop.bset;
+
+import java.util.Arrays;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.FutureTask;
+
+import com.bigdata.bop.BOp;
+import com.bigdata.bop.BOpContext;
+import com.bigdata.bop.IBindingSet;
+import com.bigdata.bop.IConstraint;
+import com.bigdata.bop.NV;
+import com.bigdata.bop.PipelineOp;
+import com.bigdata.bop.engine.BOpStats;
+import com.bigdata.relation.accesspath.IBlockingBuffer;
+
+import cutthecrap.utils.striterators.ICloseableIterator;
+
+/**
+ * An operator for conditional routing of binding sets in a pipeline. The
+ * operator will copy binding sets either to the default sink (if a condition is
+ * satisfied) and otherwise to the alternate sink (iff one is specified). If a
+ * solution fails the constraint and the alternate sink is not specified, then
+ * the solution is dropped.
+ * <p>
+ * Conditional routing can be useful where a different data flow is required
+ * based on the type of an object (for example a term identifier versus an
+ * inline term in the RDF database) or where there is a need to jump around a
+ * join group based on some condition.
+ * <p>
+ * Conditional routing will cause reordering of solutions when the alternate
+ * sink is specified as some solutions will flow to the primary sink while
+ * others flow to the alterate sink.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ * @version $Id: ConditionalRoutingOp.java 7773 2014-01-11 12:49:05Z thompsonbry
+ * $
+ */
+public class ConditionalRoutingOp extends PipelineOp {
+
+ /**
+ *
+ */
+ private static final long serialVersionUID = 1L;
+
+ public interface Annotations extends PipelineOp.Annotations {
+
+ /**
+ * An {@link IConstraint} which specifies the condition. When the
+ * condition is satisfied the binding set is routed to the default sink.
+ * When the condition is not satisfied, the binding set is routed to the
+ * alternative sink.
+ */
+ String CONDITION = ConditionalRoutingOp.class.getName() + ".condition";
+
+ }
+
+ /**
+ * Deep copy constructor.
+ *
+ * @param op
+ */
+ public ConditionalRoutingOp(final ConditionalRoutingOp op) {
+
+ super(op);
+
+ }
+
+ /**
+ * Shallow copy constructor.
+ *
+ * @param args
+ * @param annotations
+ */
+ public ConditionalRoutingOp(final BOp[] args,
+ final Map<String, Object> annotations) {
+
+ super(args, annotations);
+
+ }
+
+ public ConditionalRoutingOp(final BOp[] args, final NV... anns) {
+
+ this(args, NV.asMap(anns));
+
+ }
+
+ /**
+ * @see Annotations#CONDITION
+ */
+ public IConstraint getCondition() {
+
+ return (IConstraint) getProperty(Annotations.CONDITION);
+
+ }
+
+ @Override
+ public FutureTask<Void> eval(final BOpContext<IBindingSet> context) {
+
+ return new FutureTask<Void>(new ConditionalRouteTask(this, context));
+
+ }
+
+ /**
+ * Copy the source to the sink or the alternative sink depending on the
+ * condition.
+ */
+ static private class ConditionalRouteTask implements Callable<Void> {
+
+ private final BOpStats stats;
+
+ private final IConstraint condition;
+
+ private final ICloseableIterator<IBindingSet[]> source;
+
+ private final IBlockingBuffer<IBindingSet[]> sink;
+
+ private final IBlockingBuffer<IBindingSet[]> sink2;
+
+ ConditionalRouteTask(final ConditionalRoutingOp op,
+ final BOpContext<IBindingSet> context) {
+
+ this.stats = context.getStats();
+
+ this.condition = op.getCondition();
+
+ if (condition == null)
+ throw new IllegalArgumentException();
+
+ this.source = context.getSource();
+
+ this.sink = context.getSink();
+
+ this.sink2 = context.getSink2(); // MAY be null.
+
+// if (sink2 == null)
+// throw new IllegalArgumentException();
+
+ if (sink == sink2)
+ throw new IllegalArgumentException();
+
+ }
+
+ @Override
+ public Void call() throws Exception {
+ try {
+ while (source.hasNext()) {
+
+ final IBindingSet[] chunk = source.next();
+
+ stats.chunksIn.increment();
+ stats.unitsIn.add(chunk.length);
+
+ final IBindingSet[] def = new IBindingSet[chunk.length];
+ final IBindingSet[] alt = sink2 == null ? null
+ : new IBindingSet[chunk.length];
+
+ int ndef = 0, nalt = 0;
+
+ for (int i = 0; i < chunk.length; i++) {
+
+ if (i % 20 == 0 && Thread.interrupted()) {
+
+ // Eagerly notice if the operator is interrupted.
+ throw new RuntimeException(
+ new InterruptedException());
+
+ }
+
+ final IBindingSet bset = chunk[i].clone();
+
+ if (condition.accept(bset)) {
+
+ // solution passes condition. default sink.
+ def[ndef++] = bset;
+
+ } else if (sink2 != null) {
+
+ // solution fails condition. alternative sink.
+ alt[nalt++] = bset;
+
+ }
+
+ }
+
+ if (ndef > 0) {
+ if (ndef == def.length)
+ sink.add(def);
+ else
+ sink.add(Arrays.copyOf(def, ndef));
+// stats.chunksOut.increment();
+// stats.unitsOut.add(ndef);
+ }
+
+ if (nalt > 0 && sink2 != null) {
+ if (nalt == alt.length)
+ sink2.add(alt);
+ else
+ sink2.add(Arrays.copyOf(alt, nalt));
+// stats.chunksOut.increment();
+// stats.unitsOut.add(nalt);
+ }
+
+ }
+
+ sink.flush();
+ if (sink2 != null)
+ sink2.flush();
+
+ return null;
+
+ } finally {
+ source.close();
+ sink.close();
+ if (sink2 != null)
+ sink2.close();
+
+ }
+
+ } // call()
+
+ } // ConditionalRoutingTask.
+
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java 2014-04-21 23:18:59 UTC (rev 8136)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java 2014-04-22 16:22:14 UTC (rev 8137)
@@ -44,17 +44,17 @@
* SPARQL REGEX operator.
*/
public class RegexBOp extends XSDBooleanIVValueExpression
- implements INeedsMaterialization {
+ implements INeedsMaterialization {
/**
- *
- */
- private static final long serialVersionUID = 1357420268214930143L;
-
- private static final transient Logger log = Logger.getLogger(RegexBOp.class);
+ *
+ */
+ private static final long serialVersionUID = 1357420268214930143L;
+
+ private static final transient Logger log = Logger.getLogger(RegexBOp.class);
public interface Annotations extends XSDBooleanIVValueExpression.Annotations {
-
+
/**
* The cached regex pattern.
*/
@@ -64,65 +64,65 @@
}
private static Map<String,Object> anns(
- final IValueExpression<? extends IV> pattern,
- final IValueExpression<? extends IV> flags) {
-
- try {
-
- if (pattern instanceof IConstant &&
- (flags == null || flags instanceof IConstant)) {
-
- final IV parg = ((IConstant<IV>) pattern).get();
-
- final IV farg = flags != null ?
- ((IConstant<IV>) flags).get() : null;
-
- if (parg.hasValue() && (farg == null || farg.hasValue())) {
-
- final Value pargVal = parg.getValue();
-
- final Value fargVal = farg != null ? farg.getValue() : null;
-
- return NV.asMap(
- new NV(Annotations.PATTERN,
- getPattern(pargVal, fargVal)));
-
- }
-
- }
-
- } catch (Exception ex) {
-
- if (log.isInfoEnabled()) {
- log.info("could not create pattern for: " + pattern + ", " + flags);
- }
-
- }
-
- return BOp.NOANNS;
-
+ final IValueExpression<? extends IV> pattern,
+ final IValueExpression<? extends IV> flags) {
+
+ try {
+
+ if (pattern instanceof IConstant &&
+ (flags == null || flags instanceof IConstant)) {
+
+ final IV parg = ((IConstant<IV>) pattern).get();
+
+ final IV farg = flags != null ?
+ ((IConstant<IV>) flags).get() : null;
+
+ if (parg.hasValue() && (farg == null || farg.hasValue())) {
+
+ final Value pargVal = parg.getValue();
+
+ final Value fargVal = farg != null ? farg.getValue() : null;
+
+ return NV.asMap(
+ new NV(Annotations.PATTERN,
+ getPattern(pargVal, fargVal)));
+
+ }
+
+ }
+
+ } catch (Exception ex) {
+
+ if (log.isInfoEnabled()) {
+ log.info("could not create pattern for: " + pattern + ", " + flags);
+ }
+
+ }
+
+ return BOp.NOANNS;
+
}
- /**
- * Construct a regex bop without flags.
- */
+ /**
+ * Construct a regex bop without flags.
+ */
@SuppressWarnings("rawtypes")
- public RegexBOp(
- final IValueExpression<? extends IV> var,
- final IValueExpression<? extends IV> pattern) {
+ public RegexBOp(
+ final IValueExpression<? extends IV> var,
+ final IValueExpression<? extends IV> pattern) {
this(new BOp[] { var, pattern }, anns(pattern, null));
}
- /**
- * Construct a regex bop with flags.
- */
- @SuppressWarnings("rawtypes")
+ /**
+ * Construct a regex bop with flags.
+ */
+ @SuppressWarnings("rawtypes")
public RegexBOp(
- final IValueExpression<? extends IV> var,
- final IValueExpression<? extends IV> pattern,
- final IValueExpression<? extends IV> flags) {
+ final IValueExpression<? extends IV> var,
+ final IValueExpression<? extends IV> pattern,
+ final IValueExpression<? extends IV> flags) {
this(new BOp[] { var, pattern, flags }, anns(pattern, flags));
@@ -133,8 +133,8 @@
*/
public RegexBOp(final BOp[] args, final Map<String, Object> anns) {
- super(args, anns);
-
+ super(args, anns);
+
if (args.length < 2 || args[0] == null || args[1] == null)
throw new IllegalArgumentException();
@@ -146,33 +146,34 @@
public RegexBOp(final RegexBOp op) {
super(op);
}
-
+
+ @Override
public Requirement getRequirement() {
-
- return INeedsMaterialization.Requirement.SOMETIMES;
-
+
+ return INeedsMaterialization.Requirement.SOMETIMES;
+
}
-
+
+ @Override
public boolean accept(final IBindingSet bs) {
-
- @SuppressWarnings("rawtypes")
+
final Value var = asValue(getAndCheckBound(0, bs));
-
+
@SuppressWarnings("rawtypes")
final IV pattern = getAndCheckBound(1, bs);
@SuppressWarnings("rawtypes")
final IV flags = arity() > 2 ? get(2).get(bs) : null;
-
+
if (log.isDebugEnabled()) {
- log.debug("regex var: " + var);
- log.debug("regex pattern: " + pattern);
- log.debug("regex flags: " + flags);
+ log.debug("regex var: " + var);
+ log.debug("regex pattern: " + pattern);
+ log.debug("regex flags: " + flags);
}
-
- return accept(var, pattern.getValue(),
- flags != null ? flags.getValue() : null);
+ return accept(var, pattern.getValue(), flags != null ? flags.getValue()
+ : null);
+
}
/**
@@ -185,67 +186,87 @@
* REGEXBOp should cache the Pattern when it is a constant </a>
*/
private boolean accept(final Value arg, final Value parg, final Value farg) {
-
+
if (log.isDebugEnabled()) {
- log.debug("regex var: " + arg);
- log.debug("regex pattern: " + parg);
- log.debug("regex flags: " + farg);
+ log.debug("regex var: " + arg);
+ log.debug("regex pattern: " + parg);
+ log.debug("regex flags: " + farg);
}
-
+
if (QueryEvaluationUtil.isSimpleLiteral(arg)) {
-
+
final String text = ((Literal) arg).getLabel();
-
+
try {
-
- // first check for cached pattern
- Pattern pattern = (Pattern) getProperty(Annotations.PATTERN);
- if (pattern == null) {
- pattern = getPattern(parg, farg);
- }
+
+ // first check for cached pattern
+ Pattern pattern = (Pattern) getProperty(Annotations.PATTERN);
+
+ if (pattern == null) {
+
+ // resolve the pattern. NB: NOT cached.
+ pattern = getPattern(parg, farg);
+
+ }
+
+ if (Thread.interrupted()) {
+
+ /*
+ * Eagerly notice if the operator is interrupted.
+ *
+ * Note: Regex can be a high latency operation for a large
+ * RDF Literal. Therefore we want to check for an interrupt
+ * before each regex test. The Pattern code itself will not
+ * notice an interrupt....
+ */
+ throw new RuntimeException(new InterruptedException());
+
+ }
+
final boolean result = pattern.matcher(text).find();
+
return result;
-
+
} catch (IllegalArgumentException ex) {
-
- throw new SparqlTypeErrorException();
-
+
+ throw new SparqlTypeErrorException();
+
}
-
+
} else {
-
- throw new SparqlTypeErrorException();
-
+
+ throw new SparqlTypeErrorException();
+
}
-
+
}
- private static Pattern getPattern(final Value parg, final Value farg)
- throws IllegalArgumentException {
-
+ private static Pattern getPattern(final Value parg, final Value farg)
+ throws IllegalArgumentException {
+
if (log.isDebugEnabled()) {
- log.debug("regex pattern: " + parg);
- log.debug("regex flags: " + farg);
+ log.debug("regex pattern: " + parg);
+ log.debug("regex flags: " + farg);
}
if (QueryEvaluationUtil.isSimpleLiteral(parg)
&& (farg == null || QueryEvaluationUtil.isSimpleLiteral(farg))) {
final String ptn = ((Literal) parg).getLabel();
- String flags = "";
- if (farg != null) {
- flags = ((Literal)farg).getLabel();
- }
- int f = 0;
- for (char c : flags.toCharArray()) {
- switch (c) {
- case 's':
- f |= Pattern.DOTALL;
- break;
- case 'm':
- f |= Pattern.MULTILINE;
- break;
- case 'i': {
+ String flags = "";
+ if (farg != null) {
+ flags = ((Literal)farg).getLabel();
+ }
+ int f = 0;
+ for (char c : flags.toCharArray()) {
+ switch (c) {
+ case 's':
+ f |= Pattern.DOTALL;
+ break;
+ case 'm':
+ f |= Pattern.MULTILINE;
+ break;
+ case 'i': {
/*
* The SPARQL REGEX operator is based on the XQuery REGEX
* operator. That operator should be Unicode clean by
@@ -257,29 +278,29 @@
* > SPARQL REGEX operator does not perform case-folding
* correctly for Unicode data </a>
*/
- f |= Pattern.CASE_INSENSITIVE;
+ f |= Pattern.CASE_INSENSITIVE;
f |= Pattern.UNICODE_CASE;
- break;
- }
- case 'x':
- f |= Pattern.COMMENTS;
- break;
- case 'd':
- f |= Pattern.UNIX_LINES;
- break;
- case 'u': // Implicit with 'i' flag.
-// f |= Pattern.UNICODE_CASE;
- break;
- default:
- throw new IllegalArgumentException();
- }
- }
+ break;
+ }
+ case 'x':
+ f |= Pattern.COMMENTS;
+ break;
+ case 'd':
+ f |= Pattern.UNIX_LINES;
+ break;
+ case 'u': // Implicit with 'i' flag.
+// f |= Pattern.UNICODE_CASE;
+ break;
+ default:
+ throw new IllegalArgumentException();
+ }
+ }
final Pattern pattern = Pattern.compile(ptn, f);
return pattern;
}
-
- throw new IllegalArgumentException();
-
+
+ throw new IllegalArgumentException();
+
}
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-01 16:21:34
|
Revision: 8160
http://sourceforge.net/p/bigdata/code/8160
Author: thompsonbry
Date: 2014-05-01 16:21:25 +0000 (Thu, 01 May 2014)
Log Message:
-----------
Committing merge of the RDR branch back into the 1.3.0 branch for CI. I have run the test suites for AST evaluation, the NSS, most of the HA CI test suite, and the quads mode test suite of the sail. That all looks pretty good. I have also checked the bigdata GAS test suite, but we still lack a SPARQL level test suite for the GASService.
This commit brings in the new workbench, the RDR support, the HA load balancer, significant advances in the RDF GAS engine, etc.
@see #526 (RDR).
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/.classpath
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Depends.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpContext.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/IPredicate.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/BytesUtil.c
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/PageStats.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Journal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/ThreadLocalBufferFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/logging/log4j-dev.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/logging/log4j.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/btree/TestGetBitsFromByteArray.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/src/java/com/bigdata/ganglia/HostReport.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/src/java/com/bigdata/ganglia/HostReportComparator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/EdgesEnum.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASContext.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASEngine.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASOptions.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASProgram.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASState.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IReducer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/BFS.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/CC.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PR.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/SSSP.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/BaseGASProgram.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASContext.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASEngine.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASState.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGASEngine.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/ram/RAMGraphLoader.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/sail/SAILGASEngine.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/util/GASRunnerBase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/util/AbstractGraphFixture.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/util/GraphLoader.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/util/SailGraphLoader.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/analytics/TestBFS.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/analytics/TestSSSP.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/data/smallGraph.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/impl/ram/TestGather.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/impl/sail/TestGather.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/samples/com/bigdata/gom/samples/Example1.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/samples/com/bigdata/gom/samples/Example2.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/test/com/bigdata/gom/Example1.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/test/com/bigdata/gom/LocalGOMTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/test/com/bigdata/gom/RemoteGOMTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gom/src/test/com/bigdata/gom/TestRemoteGOM.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHANamespace.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/chem2bio2rdf/src/test/com/bigdata/perf/chem2bio2rdf/TestQuery.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/join/ChunkedMaterializationOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASEngine.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASRunner.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASState.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGraphFixture.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/AbstractIV.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/AbstractNonInlineIV.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/bnode/SidIV.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/model/BNodeContextFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNode.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataBNodeImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataStatementImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactoryImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/ntriples/BigdataNTriplesParser.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/BigdataRDFXMLParser.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/turtle/BigdataTurtleParser.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysis.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysisBase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/StaticOptimizer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCall.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceRegistry.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/history/HistoryIndexTupleSerializer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOPredicate.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOTupleSerializer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataBindingSetResolverator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataStatementIteratorImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/resources/service-providers/META-INF/services/org.openrdf.rio.RDFParserFactory
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/resources/service-providers/META-INF/services/org.openrdf.rio.RDFWriterFactory
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/AbstractBigdataGraphTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestBFS.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestGather.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestSSSP.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/internal/TestEncodeDecodeKeys.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/internal/TestEncodeDecodeMixedIVs.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestNTriplesWithSids.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestRDFXMLInterchangeWithStatementIdentifiers.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestStatementBuffer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/rio/rdfxml/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/TestStaticAnalysis.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/TestReificationDoneRightEval.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-01.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestALPPinTrac773.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTNamedSubqueryOptimizer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTUnionFiltersOptimizer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPO.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOTupleSerializer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPOValueCoders.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestStatementIdentifiers.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestTripleStore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/PrefixDeclProcessor.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/VoID.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/samples/com/bigdata/samples/NSSEmbeddedExample.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestProvenanceQuery.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestReificationDoneRightParser.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractNanoSparqlServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractTestNanoSparqlClient.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestConneg.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestFederatedQuery.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml
branches/BIGDATA_RELEASE_1_3_0/build.properties
branches/BIGDATA_RELEASE_1_3_0/build.xml
branches/BIGDATA_RELEASE_1_3_0/pom.xml
branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/startHAServices.config
branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/LEGAL/
branches/BIGDATA_RELEASE_1_3_0/LEGAL/apache-license-2_0.txt
branches/BIGDATA_RELEASE_1_3_0/LEGAL/sesame2.x-license.txt
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/README.TXT
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-client-9.1.4.v20140401.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-continuation-9.1.4.v20140401.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-http-9.1.4.v20140401.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-io-9.1.4.v20140401.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-proxy-9.1.4.v20140401.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-rewrite-9.1.4.v20140401.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-security-9.1.4.v20140401.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-server-9.1.4.v20140401.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-servlet-9.1.4.v20140401.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-util-9.1.4.v20140401.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-webapp-9.1.4.v20140401.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-xml-9.1.4.v20140401.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/servlet-api-3.1.0.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/blueprints-license.txt
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.4.0.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataBlueprintsGraph.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEventTransactionalGraph.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataVertex.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/QueryManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/BinderBase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IBinder.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IBindingExtractor.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/IPredecessor.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/TraversalDirectionEnum.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/FuzzySSSP.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/EdgeOnlyFilter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/data/smallWeightedGraph.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/data/weightedSmallGraph.png
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3LoadBalancerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll_LBS.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_GangliaLBS.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_NOP.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_RoundRobin.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASService.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/Inet4Address.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterFactoryForConstruct.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterFactoryForSelect.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/turtle/BigdataTurtleWriter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/turtle/BigdataTurtleWriterFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IDoNotJoinService.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataTriplePattern.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataTriplePatternMaterializer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/resources/service-providers/META-INF/services/org.openrdf.query.resultio.TupleQueryResultWriterFactory
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-04.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-04.srx
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/reif/rdr-04.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/LEGAL/apache-commons.txt
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/LEGAL/httpclient-cache.txt
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/LEGAL/httpclient.txt
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/LEGAL/httpcore.txt
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/LEGAL/httpmime.txt
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/LEGAL/jackson-license.txt
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/LEGAL/sesame2.x-license.txt
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/jackson-core-2.2.3.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/HostScore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/ServiceScore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/NOPLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/RoundRobinLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/DefaultHostScoringRule.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/GangliaLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/HostTable.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/IHostScoringRule.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/LoadOneHostScoringRule.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/NOPHostScoringRule.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/TestPaths.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/forward.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths1.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths1.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths2.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths3.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths3.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths4.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths4.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths5.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/reverse.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/sssp.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/sssp.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/provenance01.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sids.ttl
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/favicon.ico
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/images/
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/images/logo.png
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/indexLBS.html
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/vendor/
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/vendor/jquery.hotkeys.js
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/vendor/jquery.min.js
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/new.html
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/old.h...
[truncated message content] |
|
From: <tho...@us...> - 2014-05-02 16:52:47
|
Revision: 8166
http://sourceforge.net/p/bigdata/code/8166
Author: thompsonbry
Date: 2014-05-02 16:52:45 +0000 (Fri, 02 May 2014)
Log Message:
-----------
Refactoring of the HA Load Balancer to expose an interface that can be used by an application to take over the rewrite of the Request-URI when the request will be proxied to another service. See the new IHARequestURIRewriter interface and the new REWRITER init-param for the HALoadBalancerSerlet.
See #624 (HA LBS)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/ServiceScore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/NOPLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/RoundRobinLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/GangliaLBSPolicy.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DefaultHARequestURIRewriter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHAPolicyLifeCycle.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHARequestURIRewriter.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-02 14:59:01 UTC (rev 8165)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-02 16:52:45 UTC (rev 8166)
@@ -756,7 +756,20 @@
final Quorum<HAGlue, QuorumService<HAGlue>> quorum = (Quorum) new ZKQuorumImpl<HAGlue, HAQuorumService<HAGlue, HAJournal>>(
replicationFactor);
- // The HAJournal.
+ /**
+ * The HAJournal.
+ *
+ * FIXME This step can block for a long time if we have a lot of
+ * HALogs to scan. While it blocks, the REST API (including the LBS)
+ * is down. This means that client requests to the service end point
+ * can not be proxied to a service that is online. The problem is
+ * the interaction with the BigdataRDFServletContextListener which
+ * needs to (a) set the IIndexManager on the ServletContext; and (b)
+ * initiate the default KB create (if it is the quorum leader).
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/775" > HAJournal
+ * start() (optimization) </a>
+ */
this.journal = newHAJournal(this, config, quorum);
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config 2014-05-02 14:59:01 UTC (rev 8165)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config 2014-05-02 16:52:45 UTC (rev 8166)
@@ -1,22 +1,6 @@
-/* Zookeeper client only configuration.
+/*
+ * Zookeeper client configuration.
*/
-import java.io.File;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.util.UUID;
-
-import com.bigdata.util.NV;
-import com.bigdata.util.config.NicUtil;
-import com.bigdata.journal.Options;
-import com.bigdata.journal.BufferMode;
-import com.bigdata.journal.jini.ha.HAJournal;
-import com.bigdata.jini.lookup.entry.*;
-import com.bigdata.service.IBigdataClient;
-import com.bigdata.service.AbstractTransactionService;
-import com.bigdata.service.jini.*;
-import com.bigdata.service.jini.lookup.DataServiceFilter;
-import com.bigdata.service.jini.master.ServicesTemplate;
-import com.bigdata.jini.start.config.*;
import com.bigdata.jini.util.ConfigMath;
import org.apache.zookeeper.ZooDefs;
@@ -30,16 +14,6 @@
private static fedname = "benchmark";
- /* The logical service identifier shared by all members of the quorum.
- *
- * Note: The test fixture ignores this value. For the avoidance of
- * doubt, the value is commented out.
- */
- //private static logicalServiceId = "CI-HAJournal-1";
-
- // zookeeper
- static private sessionTimeout = (int)ConfigMath.s2ms(20);
-
}
/*
@@ -53,36 +27,16 @@
/* A comma separated list of host:port pairs, where the port is
* the CLIENT port for the zookeeper server instance.
*/
- // standalone.
servers = "localhost:2081";
- // ensemble
-// servers = bigdata.zoo1+":2181"
-// + ","+bigdata.zoo2+":2181"
-// + ","+bigdata.zoo3+":2181"
-// ;
/* Session timeout (optional). */
- sessionTimeout = bigdata.sessionTimeout;
+ sessionTimeout = (int)ConfigMath.s2ms(20);
- /*
- * ACL for the zookeeper nodes created by the bigdata federation.
- *
- * Note: zookeeper ACLs are not transmitted over secure channels
- * and are placed into plain text Configuration files by the
- * ServicesManagerServer.
- */
+ // Zookeeper ACLs.
acl = new ACL[] {
new ACL(ZooDefs.Perms.ALL, new Id("world", "anyone"))
};
- /*
- * Note: Normally on the HAJournalServer component. Hacked in the test
- * suite setup to look at the ZooKeeper component instead.
- */
-
- logicalServiceId = bigdata.logicalServiceId;
-
- replicationFactor = bigdata.replicationFactor;
}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DefaultHARequestURIRewriter.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DefaultHARequestURIRewriter.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DefaultHARequestURIRewriter.java 2014-05-02 16:52:45 UTC (rev 8166)
@@ -0,0 +1,85 @@
+/**
+Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.rdf.sail.webapp;
+
+import javax.servlet.ServletConfig;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+
+import com.bigdata.journal.IIndexManager;
+
+/**
+ * Default implementation.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+public class DefaultHARequestURIRewriter implements IHARequestURIRewriter {
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation is a NOP.
+ */
+ @Override
+ public void init(ServletConfig servletConfig, IIndexManager indexManager)
+ throws ServletException {
+
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation is a NOP.
+ */
+ @Override
+ public void destroy() {
+
+ }
+
+ @Override
+ public StringBuilder rewriteURI(final boolean isLeaderRequest,
+ final String full_prefix, final String originalRequestURL,
+ final String proxyToRequestURL, final HttpServletRequest request) {
+
+ final StringBuilder uri = new StringBuilder(proxyToRequestURL);
+
+ if (proxyToRequestURL.endsWith("/"))
+ uri.setLength(uri.length() - 1);
+
+ final String rest = originalRequestURL.substring(full_prefix.length());
+
+ if (!rest.startsWith("/"))
+ uri.append("/");
+
+ uri.append(rest);
+
+ final String query = request.getQueryString();
+
+ if (query != null)
+ uri.append("?").append(query);
+
+ return uri;
+
+ }
+
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-05-02 14:59:01 UTC (rev 8165)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-05-02 16:52:45 UTC (rev 8166)
@@ -92,11 +92,6 @@
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
*
* @see <a href="http://trac.bigdata.com/ticket/624"> HA Load Balancer </a>
- *
- * TODO If the target service winds up not joined with the met quorum by
- * the time we get there, what should it do? Report an error since we are
- * already on its internal interface? Will this servlet see that error? If
- * it does, should it handle it?
*/
public class HALoadBalancerServlet extends ProxyServlet {
@@ -110,10 +105,6 @@
public interface InitParams {
-// String ENABLED = "enabled";
-//
-// String DEFAULT_ENABLED = "false";
-
/*
* Note: /bigdata/LBS is now a base prefix. There are fully qualified
* prefix values of /bigdata/LBS/leader and /bigdata/LBS/read. This is
@@ -146,50 +137,78 @@
*/
String DEFAULT_POLICY = NOPLBSPolicy.class.getName();
+ /**
+ * The fully qualified class name of an {@link IHARequestURIRewriter}
+ * (optional - the default is {@value #DEFAULT_REWRITER}). This must be
+ * an instance of {@link IHARequestURIRewriter}. This may be used to
+ * impose application specific Request-URI rewrite semantics when a
+ * request will be proxied to another service.
+ */
+ String REWRITER = "rewriter";
+
+ String DEFAULT_REWRITER = DefaultHARequestURIRewriter.class.getName();
+
}
public HALoadBalancerServlet() {
+
super();
+
}
-// /**
-// * This servlet request attribute is used to mark a request as either an
-// * update or a read-only operation.
-// */
-// protected static final String ATTR_LBS_UPDATE_REQUEST = "lbs-update-request";
-
/**
- * The initial prefix that will be stripped off by the load balancer.
+ * The initial prefix and is formed as
+ *
+ * <pre>
+ * Context - Path / LBS
+ * </pre>
* <p>
* Note: This is set by {@link #init()}. It must not be <code>null</code>.
- * The load balancer relies on the prefix to rewrite the requestURL when the
- * {@link IHALoadBalancerPolicy} is disabled in order to forward the request
- * to the local service.
+ * The load balancer relies on the prefix to rewrite the Request-URI: (a)
+ * when it is disabled (the request will be forwarded to a local service;
+ * and (b) when the request is proxied to a remote service.
*/
private String prefix;
/**
+ * The URI path component that follows the servlet context-path to identify
+ * a request that will be handled by the load balancer component.
+ */
+ private static final String PATH_LBS = "/LBS";
+
+ /**
+ * The URI path component that follows the {@link #prefix} to identify a
+ * request that will target the quorum leader.
+ */
+ private static final String PATH_LEADER = "/leader";
+
+ /**
+ * The URI path component that follows the {@link #prefix} to identify a
+ * request that should be load balanced over the leader + followers.
+ */
+ private static final String PATH_READ = "/read";
+
+ /**
* The configured {@link IHALoadBalancerPolicy} and <code>null</code> iff
* the load balancer is disabled. If the LBS is not enabled, then it will
* strip its prefix from the URL requestURI and do a servlet forward to the
* resulting requestURI. This allows the webapp to start even if the LBS is
* not correctly configured.
*
- * TODO Since we are allowing programatic change of the policy, it would be
- * a good idea to make that change atomic with respect to any specific
- * request and to make the destroy of the policy something that occurs once
- * any in flight request has been handled (there is more than one place
- * where the policy is checked in the code). The atomic change might be
- * accomplished by attaching the policy to the request as an attribute. The
- * destroy could be achieved by reference counts for the #of in flight
- * requests flowing through a policy. The request attribute and reference
- * count could be handled together through handshaking with the policy when
- * attaching it as a request attribute in
- * {@link #service(HttpServletRequest, HttpServletResponse)}.
+ * @see InitParams#POLICY
*/
private final AtomicReference<IHALoadBalancerPolicy> policyRef = new AtomicReference<IHALoadBalancerPolicy>();
/**
+ * The {@link IHARequestURIRewriter} that rewrites the original Request-URI
+ * into a Request-URI for the target service to which the request will be
+ * proxied.
+ *
+ * @see InitParams#REWRITER
+ */
+ private final AtomicReference<IHARequestURIRewriter> rewriterRef = new AtomicReference<IHARequestURIRewriter>();
+
+ /**
* Change the {@link IHALoadBalancerPolicy} associated with this instance of
* this servlet. The new policy will be installed iff it can be initialized
* successfully. The old policy will be destroyed iff the new policy is
@@ -205,7 +224,61 @@
if (log.isInfoEnabled())
log.info("newValue=" + newValue);
+
+ setHAPolicy(newValue, policyRef);
+
+ }
+
+ /**
+ * Change the {@link IHARequestURIRewriter} associated with this instance of
+ * this servlet. The new policy will be installed iff it can be initialized
+ * successfully. The old policy will be destroyed iff the new policy is
+ * successfully installed.
+ *
+ * @param newValue
+ * The new value (required).
+ */
+ public void setRewriter(final IHARequestURIRewriter newValue) {
+ if (newValue == null)
+ throw new IllegalArgumentException();
+
+ if (log.isInfoEnabled())
+ log.info("newValue=" + newValue);
+
+ setHAPolicy(newValue, rewriterRef);
+
+ }
+
+ /**
+ * Change the {@link IHAPolicyLifeCycle} associated with this instance of
+ * this servlet. The new policy will be installed iff it can be initialized
+ * successfully. The old policy will be destroyed iff the new policy is
+ * successfully installed.
+ *
+ * @param newValue
+ * The new value (required).
+ * @param ref
+ * The {@link AtomicReference} object that holds the current
+ * value of the policy.
+ *
+ * TODO Since we are allowing programatic change of the policy,
+ * it would be a good idea to make that change atomic with
+ * respect to any specific request and to make the destroy of the
+ * policy something that occurs once any in flight request has
+ * been handled (there is more than one place where the policy is
+ * checked in the code). The atomic change might be accomplished
+ * by attaching the policy to the request as an attribute. The
+ * destroy could be achieved by reference counts for the #of in
+ * flight requests flowing through a policy. The request
+ * attribute and reference count could be handled together
+ * through handshaking with the policy when attaching it as a
+ * request attribute in
+ * {@link #service(HttpServletRequest, HttpServletResponse)}.
+ */
+ private <T extends IHAPolicyLifeCycle> void setHAPolicy(final T newValue,
+ final AtomicReference<T> ref) {
+
final ServletConfig servletConfig = getServletConfig();
final ServletContext servletContext = servletConfig.getServletContext();
@@ -252,8 +325,7 @@
}
// Install the new policy.
- final IHALoadBalancerPolicy oldValue = this.policyRef
- .getAndSet(newValue);
+ final T oldValue = ref.getAndSet(newValue);
if (oldValue != null && oldValue != newValue) {
@@ -261,7 +333,7 @@
oldValue.destroy();
}
-
+
}
/**
@@ -280,7 +352,7 @@
// // Get the as-configured prefix to be stripped from requests.
// prefix = servletConfig.getInitParameter(InitParams.PREFIX);
- prefix = BigdataStatics.getContextPath() + "/LBS";
+ prefix = BigdataStatics.getContextPath() + PATH_LBS;
final ServletContext servletContext = servletConfig.getServletContext();
@@ -293,32 +365,26 @@
return;
}
- /*
- * Setup a fall back policy. This policy will strip off the configured
- * prefix from the requestURL and forward the request to the local
- * service. If we can not establish the as-configured policy, then
- * the servlet will run with this fall back policy.
- */
+ {
+ // Get the as-configured policy.
+ final IHALoadBalancerPolicy policy = newInstance(servletConfig,
+ IHALoadBalancerPolicy.class, InitParams.POLICY,
+ InitParams.DEFAULT_POLICY);
+ // Set the as-configured policy.
+ setPolicy(policy);
+
+ }
{
-
- final IHALoadBalancerPolicy defaultPolicy = new NOPLBSPolicy();
-
- // Initialize the fallback policy.
- defaultPolicy.init(servletConfig, indexManager);
- policyRef.set(defaultPolicy);
+ final IHARequestURIRewriter rewriter = newInstance(servletConfig,
+ IHARequestURIRewriter.class, InitParams.REWRITER,
+ InitParams.DEFAULT_REWRITER);
+ setRewriter(rewriter);
+
}
- // Get the as-configured policy.
- IHALoadBalancerPolicy policy = newInstance(servletConfig,
- IHALoadBalancerPolicy.class, InitParams.POLICY,
- InitParams.DEFAULT_POLICY);
-
- // Set the as-configured policy.
- setPolicy(policy);
-
servletContext.setAttribute(BigdataServlet.ATTRIBUTE_LBS_PREFIX,
prefix);
@@ -326,7 +392,8 @@
if (log.isInfoEnabled())
log.info(servletConfig.getServletName() + " @ " + prefix
- + " :: policy=" + policy);
+ + " :: policy=" + policyRef.get() + ", rewriter="
+ + rewriterRef.get());
}
@@ -441,15 +508,32 @@
removeServlet(getServletContext(), this/* servlet */);
- final IHALoadBalancerPolicy policy = policyRef
- .getAndSet(null/* newValue */);
+ {
- if (policy != null) {
+ final IHALoadBalancerPolicy policy = policyRef
+ .getAndSet(null/* newValue */);
- policy.destroy();
+ if (policy != null) {
+ policy.destroy();
+
+ }
+
}
+ {
+
+ final IHARequestURIRewriter rewriter = rewriterRef
+ .getAndSet(null/* newValue */);
+
+ if (rewriter != null) {
+
+ rewriter.destroy();
+
+ }
+
+ }
+
prefix = null;
getServletContext().setAttribute(BigdataServlet.ATTRIBUTE_LBS_PREFIX,
@@ -592,8 +676,9 @@
* servlet in this servlet container rather than proxying it to either
* itself or another service.
*
- * FIXME This does too much work if the request is for the leader and
- * this service is not the leader. Look at it again under a debugger.
+ * TODO This does too much work if the request is for the leader and
+ * this service is not the leader. Look at it again under a debugger
+ * and optimize the code paths.
*/
if (policy.service(isLeaderRequest, request, response)) {
@@ -710,8 +795,12 @@
* <code>true</code> iff this is a leader request.
* @param prefix
* the base prefix (typically <code>/bigdata/LBS</code>)
- *
+ *
* @return The full prefix.
+ *
+ * TODO This may need to configurable. It is currently static since
+ * {@link #forwardToThisService(boolean, HttpServletRequest, HttpServletResponse)}
+ * is static.
*/
private static String getFullPrefix(final boolean isLeaderRequest,
final String prefix) {
@@ -720,7 +809,7 @@
: prefix + "/read";
return full_prefix;
-
+
}
/**
@@ -738,55 +827,72 @@
return null;
}
- final String path = request.getRequestURI();
- if (!path.startsWith(prefix))
+ final String originalRequestURI = request.getRequestURI();
+
+ if (!originalRequestURI.startsWith(prefix))
return null;
final Boolean isLeaderRequest = isLeaderRequest(request);
+
if (isLeaderRequest == null) {
// Neither /LBS/leader -nor- /LBS/read.
return null;
}
- final String proxyTo;
+
+ final String proxyToRequestURI;
+
if(isLeaderRequest) {
// Proxy to leader.
- proxyTo = policy.getLeaderURL(request);
+ proxyToRequestURI = policy.getLeaderURL(request);
} else {
// Proxy to any joined service.
- proxyTo = policy.getReaderURL(request);
+ proxyToRequestURI = policy.getReaderURL(request);
}
- if (proxyTo == null) {
+
+ if (proxyToRequestURI == null) {
// Could not rewrite.
return null;
}
- final StringBuilder uri = new StringBuilder(proxyTo);
- if (proxyTo.endsWith("/"))
- uri.setLength(uri.length() - 1);
+
// the full LBS prefix (includes /leader or /read).
final String full_prefix = getFullPrefix(isLeaderRequest, prefix);
- final String rest = path.substring(full_prefix.length());
- if (!rest.startsWith("/"))
- uri.append("/");
- uri.append(rest);
- final String query = request.getQueryString();
- if (query != null)
- uri.append("?").append(query);
+
+ // The configured Request-URL rewriter.
+ final IHARequestURIRewriter rewriter = rewriterRef.get();
+
+ if (rewriter == null) {
+ // Could not rewrite.
+ log.warn("No rewriter: requestURI="+originalRequestURI);
+ return null;
+ }
+
+ // Re-write requestURL.
+ final StringBuilder uri = rewriter.rewriteURI(//
+ isLeaderRequest,// iff request for the leader
+ full_prefix, //
+ originalRequestURI,// old
+ proxyToRequestURI, // new
+ request // request
+ );
+
+ // Normalize the request.
final URI rewrittenURI = URI.create(uri.toString()).normalize();
if (!validateDestination(rewrittenURI.getHost(), rewrittenURI.getPort()))
return null;
if (log.isInfoEnabled())
- log.info("rewrote: " + path + " => " + rewrittenURI);
+ log.info("rewrote: " + originalRequestURI + " => " + rewrittenURI);
return rewrittenURI;
}
/**
- * TODO This offers an opportunity to handle a rewrite failure. It could be
- * used to provide a default status code (e.g., 404 versus forbidden) or to
- * forward the request to this server rather than proxying to another
- * server.
+ * Note: This offers an opportunity to handle a failure where we were unable
+ * to rewrite the request to some service, e.g., because the quorum is not
+ * met. The implementation is overridden to forward the request to the local
+ * service. The local service will then generate an appropriate HTTP error
+ * response.
*/
@Override
protected void onRewriteFailed(final HttpServletRequest request,
@@ -846,13 +952,13 @@
final String rest = requestURI.substring(indexLBS + prefix.length());
- if (rest.startsWith("/leader")) {
+ if (rest.startsWith(PATH_LEADER)) {
return Boolean.TRUE;
}
- if (rest.startsWith("/read")) {
+ if (rest.startsWith(PATH_READ)) {
return Boolean.FALSE;
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java 2014-05-02 14:59:01 UTC (rev 8165)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java 2014-05-02 16:52:45 UTC (rev 8166)
@@ -24,13 +24,10 @@
import java.io.IOException;
-import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
-import com.bigdata.journal.IIndexManager;
-
/**
* Load balancer policy interface.
*
@@ -39,24 +36,9 @@
* @see HALoadBalancerServlet
* @see <a href="http://trac.bigdata.com/ticket/624">HA Load Balancer</a>
*/
-public interface IHALoadBalancerPolicy {
+public interface IHALoadBalancerPolicy extends IHAPolicyLifeCycle {
/**
- * Initialize the load balancer policy.
- *
- * @param servletConfig
- * @param indexManager
- */
- void init(ServletConfig servletConfig, IIndexManager indexManager)
- throws ServletException;
-
- /**
- * Destroy the load balancer policy (stop any asynchronous processing,
- * release any resources).
- */
- void destroy();
-
- /**
* Invoked for each request. If the response is not committed, then it will
* be handled by the {@link HALoadBalancerServlet}.
*
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHAPolicyLifeCycle.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHAPolicyLifeCycle.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHAPolicyLifeCycle.java 2014-05-02 16:52:45 UTC (rev 8166)
@@ -0,0 +1,47 @@
+/**
+Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.rdf.sail.webapp;
+
+import javax.servlet.ServletConfig;
+import javax.servlet.ServletException;
+
+import com.bigdata.journal.IIndexManager;
+
+public interface IHAPolicyLifeCycle {
+
+ /**
+ * Initialize the policy.
+ *
+ * @param servletConfig
+ * @param indexManager
+ */
+ void init(ServletConfig servletConfig, IIndexManager indexManager)
+ throws ServletException;
+
+ /**
+ * Destroy the policy (stop any asynchronous processing, release any
+ * resources).
+ */
+ void destroy();
+
+}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHARequestURIRewriter.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHARequestURIRewriter.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHARequestURIRewriter.java 2014-05-02 16:52:45 UTC (rev 8166)
@@ -0,0 +1,95 @@
+/**
+Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi......
[truncated message content] |
|
From: <jer...@us...> - 2014-05-03 23:00:09
|
Revision: 8176
http://sourceforge.net/p/bigdata/code/8176
Author: jeremy_carroll
Date: 2014-05-03 23:00:06 +0000 (Sat, 03 May 2014)
Log Message:
-----------
New tests inspired by trac904 - unfortunately all passing
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestAll.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTExistsAndJoinOrderByTypeOptimizers.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java 2014-05-03 22:59:31 UTC (rev 8175)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java 2014-05-03 23:00:06 UTC (rev 8176)
@@ -363,7 +363,7 @@
if(v1 == v2)
continue;
- if (v1 != null && v2 == null)
+ if (v1 == null || v2 == null)
return false;
if (v1.getClass().isArray()) {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java 2014-05-03 22:59:31 UTC (rev 8175)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java 2014-05-03 23:00:06 UTC (rev 8176)
@@ -23,25 +23,29 @@
*/
package com.bigdata.rdf.sparql.ast.optimizers;
-import java.util.HashMap;
-import java.util.Map;
import org.openrdf.model.impl.URIImpl;
import org.openrdf.query.algebra.StatementPattern.Scope;
import com.bigdata.bop.IBindingSet;
+import com.bigdata.bop.IValueExpression;
+import com.bigdata.bop.IVariable;
import com.bigdata.bop.ModifiableBOpBase;
+import com.bigdata.journal.ITx;
import com.bigdata.rdf.internal.IV;
import com.bigdata.rdf.internal.constraints.IsBoundBOp;
+import com.bigdata.rdf.internal.constraints.OrBOp;
import com.bigdata.rdf.sparql.ast.ASTBase;
import com.bigdata.rdf.sparql.ast.ASTContainer;
import com.bigdata.rdf.sparql.ast.AbstractASTEvaluationTestCase;
import com.bigdata.rdf.sparql.ast.ArbitraryLengthPathNode;
import com.bigdata.rdf.sparql.ast.AssignmentNode;
import com.bigdata.rdf.sparql.ast.ConstantNode;
+import com.bigdata.rdf.sparql.ast.ExistsNode;
import com.bigdata.rdf.sparql.ast.FilterNode;
import com.bigdata.rdf.sparql.ast.FunctionNode;
import com.bigdata.rdf.sparql.ast.FunctionRegistry;
+import com.bigdata.rdf.sparql.ast.GlobalAnnotations;
import com.bigdata.rdf.sparql.ast.GraphPatternGroup;
import com.bigdata.rdf.sparql.ast.GroupMemberNodeBase;
import com.bigdata.rdf.sparql.ast.IGroupMemberNode;
@@ -50,6 +54,7 @@
import com.bigdata.rdf.sparql.ast.JoinGroupNode;
import com.bigdata.rdf.sparql.ast.NamedSubqueryInclude;
import com.bigdata.rdf.sparql.ast.NamedSubqueryRoot;
+import com.bigdata.rdf.sparql.ast.NotExistsNode;
import com.bigdata.rdf.sparql.ast.PathNode;
import com.bigdata.rdf.sparql.ast.ValueExpressionNode;
import com.bigdata.rdf.sparql.ast.PathNode.*;
@@ -60,11 +65,14 @@
import com.bigdata.rdf.sparql.ast.QueryRoot;
import com.bigdata.rdf.sparql.ast.QueryType;
import com.bigdata.rdf.sparql.ast.StatementPatternNode;
+import com.bigdata.rdf.sparql.ast.SubqueryRoot;
import com.bigdata.rdf.sparql.ast.TermNode;
import com.bigdata.rdf.sparql.ast.UnionNode;
import com.bigdata.rdf.sparql.ast.VarNode;
import com.bigdata.rdf.sparql.ast.eval.AST2BOpContext;
+import com.bigdata.rdf.sparql.ast.eval.AST2BOpUtility;
import com.bigdata.rdf.sparql.ast.service.ServiceNode;
+import com.bigdata.rdf.spo.SPOKeyOrder;
public abstract class AbstractOptimizerTestCase extends AbstractASTEvaluationTestCase {
@@ -177,6 +185,7 @@
private VarNode rightVar;
private VarNode leftVar;
int varCount = 0;
+ private GlobalAnnotations globals = new GlobalAnnotations(getName(), ITx.READ_COMMITTED);
private IV iv(String id) {
return makeIV(new URIImpl("http://example/" + id));
@@ -216,7 +225,20 @@
HelperFlag... flags) {
return select(new VarNode[] { varNode }, where, flags);
}
+
+ protected SubqueryRoot ask(VarNode varNode, JoinGroupNode where) {
+ final SubqueryRoot rslt = new SubqueryRoot(QueryType.ASK);
+ final ProjectionNode projection = new ProjectionNode();
+ varNode.setAnonymous(true);
+ rslt.setProjection(projection);
+ projection.addProjectionExpression(new AssignmentNode(varNode, varNode));
+ rslt.setWhereClause(where);
+ rslt.setAskVar(toValueExpression(varNode));
+
+ return rslt;
+ }
+
protected NamedSubqueryRoot namedSubQuery(String name, VarNode varNode,
JoinGroupNode where) {
final NamedSubqueryRoot namedSubquery = new NamedSubqueryRoot(
@@ -439,6 +461,31 @@
protected AssignmentNode bind(IValueExpressionNode valueNode, VarNode varNode) {
return new AssignmentNode(varNode, valueNode);
}
+
+ protected FunctionNode or(ValueExpressionNode v1, ValueExpressionNode v2) {
+
+ FunctionNode rslt = FunctionNode.OR(v1, v2);
+ rslt.setValueExpression(new OrBOp(v1.getValueExpression(),v2.getValueExpression()));
+ return rslt;
+ }
+
+ protected ExistsNode exists(VarNode v, GraphPatternGroup<IGroupMemberNode> jg) {
+ v.setAnonymous(true);
+ ExistsNode existsNode = new ExistsNode(v, jg);
+ existsNode.setValueExpression(toValueExpression(v));
+ return existsNode;
+ }
+
+ private IVariable<? extends IV> toValueExpression(VarNode v) {
+ return (IVariable<? extends IV>) AST2BOpUtility.toVE(globals, v);
+ }
+
+ private IValueExpression<? extends IV> toValueExpression(FunctionNode n) {
+ return AST2BOpUtility.toVE(globals, n);
+ }
+ protected NotExistsNode notExists(VarNode v, GraphPatternGroup<IGroupMemberNode> jg) {
+ return new NotExistsNode(v, jg);
+ }
}
public AbstractOptimizerTestCase(String name) {
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTExistsAndJoinOrderByTypeOptimizers.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTExistsAndJoinOrderByTypeOptimizers.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTExistsAndJoinOrderByTypeOptimizers.java 2014-05-03 23:00:06 UTC (rev 8176)
@@ -0,0 +1,164 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*
+ * Created on May 3, 2014
+ */
+
+package com.bigdata.rdf.sparql.ast.optimizers;
+
+import com.bigdata.rdf.sparql.ast.ArbitraryLengthPathNode;
+
+/**
+ * Test suite for {@link ASTUnionFiltersOptimizer}.
+ *
+ * @author Jeremy Carroll
+ */
+public class TestASTExistsAndJoinOrderByTypeOptimizers extends AbstractOptimizerTestCase {
+
+ /**
+ *
+ */
+ public TestASTExistsAndJoinOrderByTypeOptimizers() {
+ }
+
+ /**
+ * @param name
+ */
+ public TestASTExistsAndJoinOrderByTypeOptimizers(String name) {
+ super(name);
+ }
+ @Override
+ IASTOptimizer newOptimizer() {
+ return new ASTOptimizerList(new ASTExistsOptimizer(),
+ new ASTJoinOrderByTypeOptimizer());
+ }
+
+ public void testSimpleExists() {
+ new Helper(){{
+ given = select( varNode(w),
+ where ( joinGroupNode(
+ filter(
+ exists(varNode(y), joinGroupNode(
+ statementPatternNode(constantNode(a),constantNode(b),varNode(w))))
+ )
+ ) ) );
+
+ expected = select( varNode(w),
+ where (joinGroupNode(
+ ask(varNode(y),
+ joinGroupNode(
+ statementPatternNode(constantNode(a),constantNode(b),varNode(w))
+ ) ),
+ filter(exists(varNode(y), joinGroupNode(
+ statementPatternNode(constantNode(a),constantNode(b),varNode(w))))
+ ) )
+ ) );
+
+ }}.test();
+
+ }
+ public void testOrExists() {
+ new Helper(){{
+ given = select( varNode(w),
+ where ( joinGroupNode(
+ filter(
+ or (
+ exists(varNode(y), joinGroupNode(
+ statementPatternNode(constantNode(a),constantNode(b),varNode(w)))),
+ exists(varNode(z), joinGroupNode(
+ statementPatternNode(constantNode(a),constantNode(c),varNode(w)))))
+ )
+ ) ) );
+
+ expected = select( varNode(w),
+ where (joinGroupNode(
+ ask(varNode(y),
+ joinGroupNode(
+ statementPatternNode(constantNode(a),constantNode(b),varNode(w))
+ ) ),
+ ask(varNode(z),
+ joinGroupNode(
+ statementPatternNode(constantNode(a),constantNode(c),varNode(w))
+ ) ),
+ filter(
+ or (
+ exists(varNode(y), joinGroupNode(
+ statementPatternNode(constantNode(a),constantNode(b),varNode(w)))),
+ exists(varNode(z), joinGroupNode(
+ statementPatternNode(constantNode(a),constantNode(c),varNode(w)))))
+ )
+ ) )
+ );
+
+ }}.test();
+
+ }
+ public void testOrWithPropertyPath() {
+ new Helper(){{
+ given = select( varNode(w),
+ where ( joinGroupNode(
+ filter(
+ or (
+ exists(varNode(y), joinGroupNode(
+ arbitartyLengthPropertyPath(varNode(w), constantNode(b), HelperFlag.ONE_OR_MORE,
+ joinGroupNode( statementPatternNode(leftVar(), constantNode(b), rightVar()) ) )
+
+ )),
+ exists(varNode(z), joinGroupNode(
+ statementPatternNode(constantNode(a),constantNode(c),varNode(w)))))
+ )
+ ) ) );
+
+ varCount = 0;
+ final ArbitraryLengthPathNode alpp1 = arbitartyLengthPropertyPath(varNode(w), constantNode(b), HelperFlag.ONE_OR_MORE,
+ joinGroupNode( statementPatternNode(leftVar(), constantNode(b), rightVar()) ) );
+ varCount = 0;
+ final ArbitraryLengthPathNode alpp2 = arbitartyLengthPropertyPath(varNode(w), constantNode(b), HelperFlag.ONE_OR_MORE,
+ joinGroupNode( statementPatternNode(leftVar(), constantNode(b), rightVar()) ) );
+ expected = select( varNode(w),
+ where (joinGroupNode(
+ ask(varNode(y),
+ joinGroupNode(
+ alpp1
+ ) ),
+ ask(varNode(z),
+ joinGroupNode(
+ statementPatternNode(constantNode(a),constantNode(c),varNode(w))
+ ) ),
+ filter(
+ or (
+ exists(varNode(y), joinGroupNode(
+ alpp2
+
+ )),
+ exists(varNode(z), joinGroupNode(
+ statementPatternNode(constantNode(a),constantNode(c),varNode(w)))))
+ )
+ ) )
+ );
+
+ }}.test();
+ }
+
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestAll.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestAll.java 2014-05-03 22:59:31 UTC (rev 8175)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestAll.java 2014-05-03 23:00:06 UTC (rev 8176)
@@ -132,7 +132,10 @@
// Unit tests for optimizer which attaches join filters to SPs.
suite.addTestSuite(TestASTAttachJoinFiltersOptimizer.class);
+
+ suite.addTestSuite(TestASTExistsAndJoinOrderByTypeOptimizers.class);
+
// Unit tests for optimizer which attaches join filters to SPs.
suite.addTestSuite(TestASTRangeOptimizer.class);
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-04 13:04:31
|
Revision: 8177
http://sourceforge.net/p/bigdata/code/8177
Author: thompsonbry
Date: 2014-05-04 13:04:27 +0000 (Sun, 04 May 2014)
Log Message:
-----------
Changed the default LBS policy from NOP to round-robin for performance testing on HA3 cluster. I will also need to test the ganglia LBS policy.
Added ability to view the current LBS policy to the HA status page.
See #624 (HA LBS)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-03 23:00:06 UTC (rev 8176)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-04 13:04:27 UTC (rev 8177)
@@ -4649,7 +4649,7 @@
if (log.isInfoEnabled())
log.info("Will set LBS: wac=" + wac + ", policy: " + policy);
- HALoadBalancerServlet.setPolicy(wac.getServletContext(), policy);
+ HALoadBalancerServlet.setLBSPolicy(wac.getServletContext(), policy);
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-05-03 23:00:06 UTC (rev 8176)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-05-04 13:04:27 UTC (rev 8177)
@@ -217,7 +217,7 @@
* @param newValue
* The new value (required).
*/
- public void setPolicy(final IHALoadBalancerPolicy newValue) {
+ public void setLBSPolicy(final IHALoadBalancerPolicy newValue) {
if (newValue == null)
throw new IllegalArgumentException();
@@ -230,6 +230,15 @@
}
/**
+ * Return the current {@link IHALoadBalancerPolicy}.
+ */
+ public IHALoadBalancerPolicy getLBSPolicy() {
+
+ return policyRef.get();
+
+ }
+
+ /**
* Change the {@link IHARequestURIRewriter} associated with this instance of
* this servlet. The new policy will be installed iff it can be initialized
* successfully. The old policy will be destroyed iff the new policy is
@@ -372,7 +381,7 @@
InitParams.DEFAULT_POLICY);
// Set the as-configured policy.
- setPolicy(policy);
+ setLBSPolicy(policy);
}
{
@@ -478,9 +487,25 @@
}
- public static void setPolicy(final ServletContext servletContext,
- final IHALoadBalancerPolicy policy) {
+ /**
+ * Set the current {@link IHALoadBalancerPolicy} for all
+ * {@link HALoadBalancerServlet} instances for the caller specified
+ * {@link ServletContext}.
+ *
+ * @param servletContext
+ * The {@link ServletContext}.
+ * @param newValue
+ * The new {@link IHALoadBalancerPolicy}.
+ *
+ * @throws IllegalArgumentException
+ * if the new policy is <code>null</code>.
+ */
+ public static void setLBSPolicy(final ServletContext servletContext,
+ final IHALoadBalancerPolicy newValue) {
+ if (newValue == null)
+ throw new IllegalArgumentException();
+
final HALoadBalancerServlet[] servlets = getServlets(servletContext);
if (servlets == null || servlets.length == 0) {
@@ -492,11 +517,45 @@
for (HALoadBalancerServlet servlet : servlets) {
- servlet.setPolicy(policy);
+ servlet.setLBSPolicy(newValue);
}
}
+
+ /**
+ * Return the {@link IHALoadBalancerPolicy}s that are in force for the
+ * active {@link HALoadBalancerServlet} instances.
+ *
+ * @param servletContext
+ * The {@link ServletContext}.
+ *
+ * @return The {@link IHALoadBalancerPolicy}[] -or- <code>null</code> if
+ * there are no {@link HALoadBalancerServlet}s.
+ */
+ public static IHALoadBalancerPolicy[] getLBSPolicy(
+ final ServletContext servletContext) {
+
+ final HALoadBalancerServlet[] servlets = getServlets(servletContext);
+
+ if (servlets == null || servlets.length == 0) {
+
+ // None running.
+ return null;
+
+ }
+
+ final IHALoadBalancerPolicy[] a = new IHALoadBalancerPolicy[servlets.length];
+
+ for (int i = 0; i < servlets.length; i++) {
+
+ a[i] = servlets[i].getLBSPolicy();
+
+ }
+
+ return a;
+
+ }
/**
* {@inheritDoc}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-05-03 23:00:06 UTC (rev 8176)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-05-04 13:04:27 UTC (rev 8177)
@@ -30,6 +30,7 @@
import java.security.DigestException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
+import java.util.Arrays;
import java.util.Iterator;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
@@ -283,7 +284,17 @@
p.text("Service: restorePolicy="
+ journal.getSnapshotManager().getRestorePolicy())
.node("br").close();
-
+
+ // LBS policy
+ {
+
+ final IHALoadBalancerPolicy[] a = HALoadBalancerServlet
+ .getLBSPolicy(req.getServletContext());
+
+ p.text("Service: LBSPolicy="
+ + (a == null ? "N/A" : Arrays.toString(a)))
+ .node("br").close();
+ }
// if(true) {
// /*
// * HABackup: disable this code block. It is for
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml 2014-05-03 23:00:06 UTC (rev 8176)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml 2014-05-04 13:04:27 UTC (rev 8177)
@@ -137,6 +137,31 @@
<servlet-class>com.bigdata.rdf.sail.webapp.HALoadBalancerServlet</servlet-class>
<load-on-startup>1</load-on-startup>
<async-supported>true</async-supported>
+ <init-param>
+ <param-name>policy</param-name>
+ <param-value>com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy</param-value>
+ <description>
+ The load balancer policy. This must be an instance of the
+ IHALoadBalancerPolicy interface. A default policy (NOPLBSPolicy) is
+ used when no value is specified.
+
+ The policies differ ONLY in how they handle READ requests. All policies
+ proxy updates to the leader. If you do not want update proxying, then
+ use a URL that does not address the HALoadBalancerServlet.
+
+ The following policies are pre-defined:
+
+ com.bigdata.rdf.sail.webapp.lbs.policy.NOPLBSPolicy - Does not load balance read requests.
+
+ com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy - Round robin for read requests.
+
+ com.bigdata.rdf.sail.webapp.lbs.policy.GangliaLBSPolicy - Load based proxying for read requests using ganglia.
+
+ Some policies can be configured using additional init-param elements
+ that they understand. See the javadoc for the individual policies for
+ more information.
+ </description>
+ </init-param>
</servlet>
<servlet-mapping>
<servlet-name>Load Balancer</servlet-name>
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-04 20:35:38
|
Revision: 8184
http://sourceforge.net/p/bigdata/code/8184
Author: thompsonbry
Date: 2014-05-04 20:35:34 +0000 (Sun, 04 May 2014)
Log Message:
-----------
Changed the default policy to the GangliaLBSPolicy for testing.
Added min/max threads environment variables to startHAServices. This are examined by jetty.xml.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml
branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml 2014-05-04 19:33:40 UTC (rev 8183)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml 2014-05-04 20:35:34 UTC (rev 8184)
@@ -139,7 +139,7 @@
<async-supported>true</async-supported>
<init-param>
<param-name>policy</param-name>
- <param-value>com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy</param-value>
+ <param-value>com.bigdata.rdf.sail.webapp.lbs.policy.ganglia.GangliaLBSPolicy</param-value>
<description>
The load balancer policy. This must be an instance of the
IHALoadBalancerPolicy interface. A default policy (NOPLBSPolicy) is
@@ -151,15 +151,23 @@
The following policies are pre-defined:
- com.bigdata.rdf.sail.webapp.lbs.policy.NOPLBSPolicy - Does not load balance read requests.
+ com.bigdata.rdf.sail.webapp.lbs.policy.NOPLBSPolicy:
- com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy - Round robin for read requests.
+ Does not load balance read requests.
- com.bigdata.rdf.sail.webapp.lbs.policy.GangliaLBSPolicy - Load based proxying for read requests using ganglia.
+ com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy:
- Some policies can be configured using additional init-param elements
- that they understand. See the javadoc for the individual policies for
- more information.
+ Round robin for read requests.
+
+ com.bigdata.rdf.sail.webapp.lbs.policy.ganglia.GangliaLBSPolicy:
+
+ Load based proxying for read requests using ganglia. Either gmond
+ must be installed on each node or the embedded GangliaService must
+ be enabled such that performance metrics are collected and reported.
+
+ Some of these policies can be further configured using additional
+ init-param elements that they understand. See the javadoc for the
+ individual policies for more information.
</description>
</init-param>
</servlet>
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-04 19:33:40 UTC (rev 8183)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-04 20:35:34 UTC (rev 8184)
@@ -82,6 +82,8 @@
-DZK_SERVERS=${ZK_SERVERS}\
-DHA_PORT=${HA_PORT}\
"-Dcom.bigdata.hostname=${BIGDATA_HOSTNAME}"\
+ "-Djetty.threads.min=${JETTY_THREADS_MIN}"\
+ "-Djetty.threads.max=${JETTY_THREADS_MAX}"\
"-Djetty.port=${JETTY_PORT}"\
"-Djetty.resourceBase=${JETTY_RESOURCE_BASE}"\
"-DJETTY_XML=${JETTY_XML}"\
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <jer...@us...> - 2014-05-04 21:34:03
|
Revision: 8191
http://sourceforge.net/p/bigdata/code/8191
Author: jeremy_carroll
Date: 2014-05-04 21:33:58 +0000 (Sun, 04 May 2014)
Log Message:
-----------
Improve the print out of debug representations - in particular ensure that the content of FILTER EXISTS and FILTER NOT EXISTS are shown prior to the optimizer step that pulls the content out as a subquery.
Also shorten the names of some annotations in some debug representations.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/IValueExpression.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FilterNode.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FunctionNode.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IQueryNode.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IValueExpressionNode.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/SubqueryFunctionNodeBase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/TermNode.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ValueExpressionNode.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/TestAST.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOp.java 2014-05-04 21:15:18 UTC (rev 8190)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOp.java 2014-05-04 21:33:58 UTC (rev 8191)
@@ -211,6 +211,24 @@
*/
boolean isController();
+ /**
+ * The contract of this method at this level is under-specified.
+ * Sub-classes may choose between:
+ *
+ * - return a string representation of the object, similar to the use of {@link #toString()}
+ *
+ * Or:
+ *
+ * - return a pretty-print representation of the object with indent
+ *
+ * Note that the former contract may or may not include recursive descent through a tree-like
+ * object, whereas the latter almost certainly does.
+ *
+ * @param indent
+ * @return
+ */
+ String toString(final int indent);
+
/**
* Interface declaring well known annotations.
* <p>
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java 2014-05-04 21:15:18 UTC (rev 8190)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java 2014-05-04 21:33:58 UTC (rev 8191)
@@ -156,9 +156,44 @@
return sb.toString();
}
+
+ /**
+ * Append a name to a string buffer, possibly shortening the name.
+ * The current algorithm for name shortening is to take the end of the name
+ * after the pen-ultimate '.'.
+ * @param sb
+ * @param longishName
+ */
+ protected void shortenName(final StringBuilder sb, final String longishName) {
+ int lastDot = longishName.lastIndexOf('.');
+ if (lastDot != -1) {
+ int lastButOneDot = longishName.lastIndexOf('.', lastDot - 1);
+ sb.append(longishName.substring(lastButOneDot + 1));
+ return;
+ }
+ sb.append(longishName);
+ }
+ /**
+ * Add a string representation of annotations into a string builder.
+ * By default this is a non-recursive operation, however
+ * subclasses may override {@link #annotationValueToString(StringBuilder, BOp, int)}
+ * in order to make this recursive.
+ * @param sb
+ */
protected void annotationsToString(final StringBuilder sb) {
- final Map<String,Object> annotations = annotations();
+ annotationsToString(sb, 0);
+ }
+
+ /**
+ * Add a string representation of annotations into a string builder.
+ * By default this is a non-recursive operation, however
+ * subclasses may override {@link #annotationValueToString(StringBuilder, BOp, int)}
+ * in order to make this recursive.
+ * @param sb
+ */
+ protected void annotationsToString(final StringBuilder sb, final int indent) {
+ final Map<String,Object> annotations = annotations();
if (!annotations.isEmpty()) {
sb.append("[");
boolean first = true;
@@ -169,20 +204,35 @@
sb.append(", ");
final String key = e.getKey();
final Object val = e.getValue();
+ shortenName(sb, key);
+ sb.append("=");
if (val != null && val.getClass().isArray()) {
- sb.append(key + "=" + Arrays.toString((Object[]) val));
+ sb.append(Arrays.toString((Object[]) val));
} else if (key.equals(IPredicate.Annotations.FLAGS)) {
- sb.append(key + "=" + Tuple.flagString((Integer) val));
+ sb.append(Tuple.flagString((Integer) val));
} else if( val instanceof BOp) {
- sb.append(key + "=" + ((BOp) val).toShortString());
+ annotationValueToString(sb, (BOp)val, indent);
} else {
- sb.append(key + "=" + val);
+ sb.append(val);
}
first = false;
}
sb.append("]");
}
- }
+ }
+
+ /**
+ * Add a string representation of a BOp annotation value into a string builder.
+ * By default this is a non-recursive operation, however
+ * subclasses may override and give a recursive definition, which should respect
+ * the given indent.
+ * @param sb The destination buffer
+ * @param val The BOp to serialize
+ * @param indent An indent to use if a recursive approach is chosen.
+ */
+ protected void annotationValueToString(final StringBuilder sb, final BOp val, final int indent) {
+ sb.append(val.toString());
+ }
@Override
final public Object getRequiredProperty(final String name) {
@@ -441,6 +491,26 @@
}
+ /**
+ * The contract of this method at this level is under-specified.
+ * Sub-classes may choose between:
+ *
+ * - return a string representation of the object, similar to the use of {@link #toString()}
+ *
+ * Or:
+ *
+ * - return a pretty-print representation of the object with indent
+ *
+ * Note that the former contract may or may not include recursive descent through a tree-like
+ * object, whereas the latter almost certainly does.
+ *
+ * @param indent
+ * @return
+ */
+ public String toString(int indent) {
+ return toString();
+ }
+
private static final transient String ws = " ";
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/IValueExpression.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/IValueExpression.java 2014-05-04 21:15:18 UTC (rev 8190)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/IValueExpression.java 2014-05-04 21:33:58 UTC (rev 8191)
@@ -26,5 +26,11 @@
* <code>null</code>.
*/
E get(IBindingSet bindingSet);
+ /**
+ * A string representation of a recursive structure with pretty-print indent.
+ * @param indent
+ * @return
+ */
+ String toString(int indent);
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FilterNode.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FilterNode.java 2014-05-04 21:15:18 UTC (rev 8190)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FilterNode.java 2014-05-04 21:33:58 UTC (rev 8191)
@@ -87,7 +87,7 @@
sb.append("\n");
sb.append(indent(indent));
- sb.append("FILTER( ").append(getValueExpressionNode()).append(" )");
+ sb.append("FILTER( ").append(getValueExpressionNode().toString(indent+1)).append(" )");
// if (getQueryHints() != null) {
// sb.append("\n");
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FunctionNode.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FunctionNode.java 2014-05-04 21:15:18 UTC (rev 8190)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FunctionNode.java 2014-05-04 21:33:58 UTC (rev 8191)
@@ -2,6 +2,7 @@
import java.io.Serializable;
import java.util.Collections;
+import java.util.Iterator;
import java.util.Map;
import org.openrdf.model.URI;
@@ -9,6 +10,7 @@
import com.bigdata.bop.BOp;
import com.bigdata.bop.IValueExpression;
import com.bigdata.bop.NV;
+import com.bigdata.bop.BOp.Annotations;
/**
* AST node for anything which is neither a constant nor a variable, including
@@ -295,4 +297,35 @@
}
+
+ /**
+ * Provides a pretty print representation with recursive descent.
+ */
+ @Override
+ public String toString(int i) {
+
+ final StringBuilder sb = new StringBuilder();
+ sb.append(getClass().getSimpleName());
+ final Integer bopId = (Integer) getProperty(Annotations.BOP_ID);
+ if (bopId != null) {
+ sb.append("[" + bopId + "]");
+ }
+ sb.append("(");
+ int nwritten = 0;
+ final Iterator<BOp> itr = argIterator();
+ while(itr.hasNext()) {
+ final BOp t = itr.next();
+ if (nwritten > 0)
+ sb.append(',');
+ if (t == null) {
+ sb.append("<null>");
+ } else {
+ sb.append(((IValueExpressionNode)t).toString(i+1));
+ }
+ nwritten++;
+ }
+ sb.append(")");
+ annotationsToString(sb, i);
+ return sb.toString();
+ }
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IQueryNode.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IQueryNode.java 2014-05-04 21:15:18 UTC (rev 8190)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IQueryNode.java 2014-05-04 21:33:58 UTC (rev 8191)
@@ -37,8 +37,11 @@
}
+
/**
- * Pretty print with an indent.
+ * A string representation of a recursive structure with pretty-print indent.
+ * @param indent
+ * @return
*/
String toString(final int indent);
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IValueExpressionNode.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IValueExpressionNode.java 2014-05-04 21:15:18 UTC (rev 8190)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IValueExpressionNode.java 2014-05-04 21:33:58 UTC (rev 8191)
@@ -34,4 +34,11 @@
*/
void invalidate();
+ /**
+ * A string representation of a recursive structure with pretty-print indent.
+ * @param indent
+ * @return
+ */
+ String toString(final int indent);
+
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java 2014-05-04 21:15:18 UTC (rev 8190)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java 2014-05-04 21:33:58 UTC (rev 8191)
@@ -574,7 +574,7 @@
if (getQueryHints() != null && !getQueryHints().isEmpty()) {
sb.append("\n");
sb.append(indent(indent + 1));
- sb.append(Annotations.QUERY_HINTS);
+ shortenName(sb, Annotations.QUERY_HINTS);
sb.append("=");
sb.append(getQueryHints().toString());
}
@@ -586,7 +586,7 @@
if (rangeCount != null) {
sb.append("\n");
sb.append(indent(indent + 1));
- sb.append(AST2BOpBase.Annotations.ESTIMATED_CARDINALITY);
+ shortenName(sb, AST2BOpBase.Annotations.ESTIMATED_CARDINALITY);
sb.append("=");
sb.append(rangeCount.toString());
}
@@ -594,7 +594,7 @@
if (keyOrder != null) {
sb.append("\n");
sb.append(indent(indent + 1));
- sb.append(AST2BOpBase.Annotations.ORIGINAL_INDEX);
+ shortenName(sb, AST2BOpBase.Annotations.ORIGINAL_INDEX);
sb.append("=");
sb.append(keyOrder.toString());
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/SubqueryFunctionNodeBase.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/SubqueryFunctionNodeBase.java 2014-05-04 21:15:18 UTC (rev 8190)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/SubqueryFunctionNodeBase.java 2014-05-04 21:33:58 UTC (rev 8191)
@@ -32,6 +32,7 @@
import org.openrdf.model.URI;
import com.bigdata.bop.BOp;
+import com.bigdata.bop.IValueExpression;
/**
* A special function node for modeling value expression nodes which are
@@ -119,4 +120,9 @@
}
+ @Override
+ protected void annotationValueToString(final StringBuilder sb, final BOp val, int i) {
+ sb.append(val.toString(i));
+ }
+
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/TermNode.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/TermNode.java 2014-05-04 21:15:18 UTC (rev 8190)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/TermNode.java 2014-05-04 21:33:58 UTC (rev 8191)
@@ -88,5 +88,10 @@
return null;
}
+
+ @Override
+ public String toString(int i) {
+ return toShortString();
+ }
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ValueExpressionNode.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ValueExpressionNode.java 2014-05-04 21:15:18 UTC (rev 8190)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ValueExpressionNode.java 2014-05-04 21:33:58 UTC (rev 8191)
@@ -12,7 +12,7 @@
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
* @version $Id$
*/
-public class ValueExpressionNode extends ASTBase implements
+public abstract class ValueExpressionNode extends ASTBase implements
IValueExpressionNode {
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/TestAST.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/TestAST.java 2014-05-04 21:15:18 UTC (rev 8190)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/TestAST.java 2014-05-04 21:33:58 UTC (rev 8191)
@@ -39,6 +39,7 @@
import com.bigdata.bop.Var;
import com.bigdata.bop.ap.Predicate;
import com.bigdata.journal.ITx;
+import com.bigdata.rdf.internal.IV;
import com.bigdata.rdf.internal.VTE;
import com.bigdata.rdf.internal.constraints.CompareBOp;
import com.bigdata.rdf.internal.constraints.IVValueExpression;
@@ -262,7 +263,7 @@
groupBy.addExpr(new AssignmentNode(new VarNode("s"), new VarNode("s")));
final HavingNode havingBy = new HavingNode();
- havingBy.addExpr(new ValueExpressionNode(new CompareBOp(Var.var("x"),
+ havingBy.addExpr(new LegacyTestValueExpressionNode(new CompareBOp(Var.var("x"),
Var.var("y"), CompareOp.GT)));
final OrderByNode orderBy = new OrderByNode();
@@ -331,7 +332,7 @@
}
public FilterNode filter(final int id) {
- return new FilterNode(new ValueExpressionNode(new Filter(id)));
+ return new FilterNode(new LegacyTestValueExpressionNode(new Filter(id)));
}
public Predicate pred(final int id) {
@@ -345,8 +346,23 @@
return new Filter(id);
}
-
- private static final class Filter extends XSDBooleanIVValueExpression {
+ /**
+ * @deprecated This was just for compatibility with SOp2ASTUtility. It is
+ * only used by the test suite now.
+ */
+ @Deprecated
+ private static final class LegacyTestValueExpressionNode extends ValueExpressionNode {
+ private LegacyTestValueExpressionNode(IValueExpression<? extends IV> ve) {
+ super(ve);
+ }
+
+ @Override
+ public String toString(int i) {
+ return toShortString();
+ }
+ }
+
+ private static final class Filter extends XSDBooleanIVValueExpression {
/**
*
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-05 11:59:19
|
Revision: 8194
http://sourceforge.net/p/bigdata/code/8194
Author: thompsonbry
Date: 2014-05-05 11:59:15 +0000 (Mon, 05 May 2014)
Log Message:
-----------
Published new version of bigdata-ganglia (1.0.2) with new APIs for GangliaService that are used by the GangliaLBSPolicy. The artifact has been pushed to the systap maven repository.
See #624 (HA LBS)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/build.properties
branches/BIGDATA_RELEASE_1_3_0/build.properties
branches/BIGDATA_RELEASE_1_3_0/pom.xml
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.2.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/src/releases/bigdata-ganglia-1.0.2.txt
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.1.jar
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.1.jar
===================================================================
(Binary files differ)
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.2.jar
===================================================================
(Binary files differ)
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.2.jar
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.2.jar 2014-05-05 01:10:28 UTC (rev 8193)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.2.jar 2014-05-05 11:59:15 UTC (rev 8194)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.2.jar
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/build.properties
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/build.properties 2014-05-05 01:10:28 UTC (rev 8193)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/build.properties 2014-05-05 11:59:15 UTC (rev 8194)
@@ -38,7 +38,7 @@
release.dir=ant-release
# The build version.
-build.ver=1.0.1
+build.ver=1.0.2
# Set true to do a snapshot build. This changes the value of ${version} to
# include the date.
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/src/releases/bigdata-ganglia-1.0.2.txt
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/src/releases/bigdata-ganglia-1.0.2.txt (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/src/releases/bigdata-ganglia-1.0.2.txt 2014-05-05 11:59:15 UTC (rev 8194)
@@ -0,0 +1,55 @@
+This library provides a pure Java embedded peer for Ganglia. The GangliaService
+both listens and reports metrics. This means that it is capable of provide load
+balanced reported from soft state and can even be used as a substitute for gmond
+on operating systems (such as Windows) to which gmond has not been ported.
+
+The main entry point is GangliaService. It is trivial to setup with defaults and
+you can easily register your own metrics collection classes to report out on your
+application.
+
+GangliaServer service = new GangliaService("MyService");
+// Register to collect metrics.
+service.addMetricCollector(new MyMetricsCollector());
+// Join the ganglia network; Start collecting and reporting metrics.
+service.run();
+
+The following will return the default load balanced report, which contains
+exactly the same information that you would get from gstat -a. You can also use
+an alternative method signature to get a report based on your own list of metrics
+and/or have the report sorted by the metric (or even a synthetic metric) of your
+choice.
+
+IHostReport[] hostReport = service.getHostReport();
+
+Have fun!
+
+Change log:
+
+1.0.2:
+
+- Minor API additions for GangliaService.
+
+1.0.1:
+
+- Added utility class for parsing a string into an array of host addresses.
+- GangliaListener was ignoring interrupts.
+
+--------------------------
+
+ Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+----
+This product includes software developed by The Apache Software Foundation (http://www.apache.org/).
+License: http://www.apache.org/licenses/LICENSE-2.0
Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-05-05 01:10:28 UTC (rev 8193)
+++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-05-05 11:59:15 UTC (rev 8194)
@@ -69,7 +69,7 @@
fastutil.version=5.1.5
dsiutils.version=1.0.6-020610
lgplutils.version=1.0.7-270114
-ganglia-version=1.0.1
+ganglia-version=1.0.2
gas-version=0.1.0
jackson-version=2.2.3
Modified: branches/BIGDATA_RELEASE_1_3_0/pom.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/pom.xml 2014-05-05 01:10:28 UTC (rev 8193)
+++ branches/BIGDATA_RELEASE_1_3_0/pom.xml 2014-05-05 11:59:15 UTC (rev 8194)
@@ -97,7 +97,7 @@
<fastutil.version>5.1.5</fastutil.version>
<dsiutils.version>1.0.6-020610</dsiutils.version>
<lgplutils.version>1.0.7-270114</lgplutils.version>
- <bigdata.ganglia.version>1.0.1</bigdata.ganglia.version>
+ <bigdata.ganglia.version>1.0.2</bigdata.ganglia.version>
<jackson.version>2.2.3</jackson.version>
</properties>
<!-- TODO Can we declare the versions of the dependencies here as
@@ -364,11 +364,11 @@
mvn deploy:deploy-file \
-DgroupId=com.bigdata \
-DartifactId=bigdata-ganglia \
- -Dversion=1.0.1 \
+ -Dversion=1.0.2 \
-Dpackaging=jar \
-DrepositoryId=bigdata.releases \
-Durl=scpexe://www.systap.com/srv/www/htdocs/systap.com/maven/releases/ \
- -Dfile=bigdata/lib/bigdata-ganglia-1.0.1.jar
+ -Dfile=bigdata/lib/bigdata-ganglia-1.0.2.jar
-->
<groupId>com.bigdata</groupId>
<artifactId>bigdata-ganglia</artifactId>
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <mrp...@us...> - 2014-05-05 15:08:40
|
Revision: 8196
http://sourceforge.net/p/bigdata/code/8196
Author: mrpersonick
Date: 2014-05-05 15:08:22 +0000 (Mon, 05 May 2014)
Log Message:
-----------
got rid of IDoNotJoinService
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IDoNotJoinService.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java 2014-05-05 12:29:04 UTC (rev 8195)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java 2014-05-05 15:08:22 UTC (rev 8196)
@@ -58,7 +58,6 @@
import com.bigdata.rdf.model.BigdataURI;
import com.bigdata.rdf.sparql.ast.service.BigdataServiceCall;
import com.bigdata.rdf.sparql.ast.service.ExternalServiceCall;
-import com.bigdata.rdf.sparql.ast.service.IDoNotJoinService;
import com.bigdata.rdf.sparql.ast.service.RemoteServiceCall;
import com.bigdata.rdf.sparql.ast.service.ServiceCall;
import com.bigdata.rdf.sparql.ast.service.ServiceCallUtility;
@@ -586,52 +585,6 @@
: new UnsyncLocalOutputBuffer<IBindingSet>(
op.getChunkCapacity(), sink2);
- if (serviceCall instanceof IDoNotJoinService) {
-
- // The iterator draining the subquery
- ICloseableIterator<IBindingSet[]> serviceSolutionItr = null;
- try {
-
- /*
- * Invoke the service.
- *
- * Note: Returns [null] IFF SILENT and SERVICE ERROR.
- */
-
- serviceSolutionItr = doServiceCall(serviceCall, chunk);
-
- if (serviceSolutionItr != null) {
-
- while (serviceSolutionItr.hasNext()) {
-
- final IBindingSet[] bsets =
- serviceSolutionItr.next();
-
- for (IBindingSet bs : bsets) {
-
- unsyncBuffer.add(bs);
-
- }
-
- }
-
- }
-
- } finally {
-
- // ensure the service call iterator is closed.
- if (serviceSolutionItr != null)
- serviceSolutionItr.close();
-
- }
-
- unsyncBuffer.flush();
-
- // done.
- return null;
-
- }
-
final JVMHashJoinUtility state = new JVMHashJoinUtility(op,
silent ? JoinTypeEnum.Optional : JoinTypeEnum.Normal
);
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IDoNotJoinService.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IDoNotJoinService.java 2014-05-05 12:29:04 UTC (rev 8195)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IDoNotJoinService.java 2014-05-05 15:08:22 UTC (rev 8196)
@@ -1,35 +0,0 @@
-/**
-
-Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-package com.bigdata.rdf.sparql.ast.service;
-
-/**
- * Service calls can implement this interface and they will not be routed
- * through a hash join in the query plan. They will be responsible for their
- * own join internally.
- *
- * @author mikepersonick
- */
-public interface IDoNotJoinService {
-
-}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-05 18:58:41
|
Revision: 8201
http://sourceforge.net/p/bigdata/code/8201
Author: thompsonbry
Date: 2014-05-05 18:58:36 +0000 (Mon, 05 May 2014)
Log Message:
-----------
Refactored the package for several LBS files.
See #624 (HA LBS)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3LoadBalancerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_GangliaLBS.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_NOP.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_RoundRobin.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/ServiceScore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/GangliaLBSPolicy.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/DefaultHARequestURIRewriter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/IHALoadBalancerPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/IHAPolicyLifeCycle.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/IHARequestURIRewriter.java
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DefaultHARequestURIRewriter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHAPolicyLifeCycle.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHARequestURIRewriter.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-05 17:17:10 UTC (rev 8200)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -104,8 +104,8 @@
import com.bigdata.rdf.sail.CreateKBTask;
import com.bigdata.rdf.sail.webapp.ConfigParams;
import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet;
-import com.bigdata.rdf.sail.webapp.IHALoadBalancerPolicy;
import com.bigdata.rdf.sail.webapp.NanoSparqlServer;
+import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy;
import com.bigdata.rwstore.RWStore;
import com.bigdata.service.AbstractHATransactionService;
import com.bigdata.service.jini.FakeLifeCycle;
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3LoadBalancerTestCase.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3LoadBalancerTestCase.java 2014-05-05 17:17:10 UTC (rev 8200)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3LoadBalancerTestCase.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -37,9 +37,9 @@
import com.bigdata.journal.jini.ha.HAJournalServer.HAQuorumService;
import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest;
import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet;
-import com.bigdata.rdf.sail.webapp.IHALoadBalancerPolicy;
import com.bigdata.rdf.sail.webapp.client.RemoteRepository;
import com.bigdata.rdf.sail.webapp.client.RemoteRepository.RemoveOp;
+import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy;
/**
* Test suite for the HA load balancer.
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2014-05-05 17:17:10 UTC (rev 8200)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -102,7 +102,7 @@
import com.bigdata.rdf.sail.BigdataSailRepository;
import com.bigdata.rdf.sail.BigdataSailRepositoryConnection;
import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet;
-import com.bigdata.rdf.sail.webapp.IHALoadBalancerPolicy;
+import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy;
import com.bigdata.rdf.store.AbstractTripleStore;
import com.bigdata.service.jini.RemoteDestroyAdmin;
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_GangliaLBS.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_GangliaLBS.java 2014-05-05 17:17:10 UTC (rev 8200)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_GangliaLBS.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -23,7 +23,7 @@
*/
package com.bigdata.journal.jini.ha;
-import com.bigdata.rdf.sail.webapp.IHALoadBalancerPolicy;
+import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy;
import com.bigdata.rdf.sail.webapp.lbs.policy.ganglia.GangliaLBSPolicy;
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_NOP.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_NOP.java 2014-05-05 17:17:10 UTC (rev 8200)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_NOP.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -23,7 +23,7 @@
*/
package com.bigdata.journal.jini.ha;
-import com.bigdata.rdf.sail.webapp.IHALoadBalancerPolicy;
+import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy;
import com.bigdata.rdf.sail.webapp.lbs.policy.NOPLBSPolicy;
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_RoundRobin.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_RoundRobin.java 2014-05-05 17:17:10 UTC (rev 8200)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_RoundRobin.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -23,7 +23,7 @@
*/
package com.bigdata.journal.jini.ha;
-import com.bigdata.rdf.sail.webapp.IHALoadBalancerPolicy;
+import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy;
import com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy;
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-05-05 17:17:10 UTC (rev 8200)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -44,6 +44,7 @@
import com.bigdata.journal.IIndexManager;
import com.bigdata.quorum.AbstractQuorum;
import com.bigdata.rdf.sail.webapp.client.IMimeTypes;
+import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy;
/**
* Useful glue for implementing service actions, but does not directly implement
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DefaultHARequestURIRewriter.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DefaultHARequestURIRewriter.java 2014-05-05 17:17:10 UTC (rev 8200)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DefaultHARequestURIRewriter.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -1,85 +0,0 @@
-/**
-Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-package com.bigdata.rdf.sail.webapp;
-
-import javax.servlet.ServletConfig;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServletRequest;
-
-import com.bigdata.journal.IIndexManager;
-
-/**
- * Default implementation.
- *
- * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- */
-public class DefaultHARequestURIRewriter implements IHARequestURIRewriter {
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation is a NOP.
- */
- @Override
- public void init(ServletConfig servletConfig, IIndexManager indexManager)
- throws ServletException {
-
- }
-
- /**
- * {@inheritDoc}
- * <p>
- * This implementation is a NOP.
- */
- @Override
- public void destroy() {
-
- }
-
- @Override
- public StringBuilder rewriteURI(final boolean isLeaderRequest,
- final String full_prefix, final String originalRequestURL,
- final String proxyToRequestURL, final HttpServletRequest request) {
-
- final StringBuilder uri = new StringBuilder(proxyToRequestURL);
-
- if (proxyToRequestURL.endsWith("/"))
- uri.setLength(uri.length() - 1);
-
- final String rest = originalRequestURL.substring(full_prefix.length());
-
- if (!rest.startsWith("/"))
- uri.append("/");
-
- uri.append(rest);
-
- final String query = request.getQueryString();
-
- if (query != null)
- uri.append("?").append(query);
-
- return uri;
-
- }
-
-}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-05-05 17:17:10 UTC (rev 8200)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -42,6 +42,10 @@
import com.bigdata.journal.IIndexManager;
import com.bigdata.journal.PlatformStatsPlugIn;
import com.bigdata.journal.jini.ha.HAJournal;
+import com.bigdata.rdf.sail.webapp.lbs.DefaultHARequestURIRewriter;
+import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy;
+import com.bigdata.rdf.sail.webapp.lbs.IHAPolicyLifeCycle;
+import com.bigdata.rdf.sail.webapp.lbs.IHARequestURIRewriter;
import com.bigdata.rdf.sail.webapp.lbs.policy.NOPLBSPolicy;
import com.bigdata.util.InnerCause;
import com.bigdata.util.StackInfoReport;
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-05-05 17:17:10 UTC (rev 8200)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -64,6 +64,7 @@
import com.bigdata.quorum.zk.ZKQuorumClient;
import com.bigdata.quorum.zk.ZKQuorumImpl;
import com.bigdata.rdf.sail.webapp.StatusServlet.DigestEnum;
+import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy;
import com.bigdata.zookeeper.DumpZookeeper;
/**
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java 2014-05-05 17:17:10 UTC (rev 8200)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -1,86 +0,0 @@
-/**
-Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-package com.bigdata.rdf.sail.webapp;
-
-import java.io.IOException;
-
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-/**
- * Load balancer policy interface.
- *
- * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- *
- * @see HALoadBalancerServlet
- * @see <a href="http://trac.bigdata.com/ticket/624">HA Load Balancer</a>
- */
-public interface IHALoadBalancerPolicy extends IHAPolicyLifeCycle {
-
- /**
- * Invoked for each request. If the response is not committed, then it will
- * be handled by the {@link HALoadBalancerServlet}.
- *
- * @param isLeaderRequest
- * <code>true</code> iff this request must be directed to the
- * leaeder and <code>false</code> iff this request may be load
- * balanced over the joined services. UPDATEs MUST be handled by
- * the leader. Read requests can be handled by any service that
- * is joined with the met quorum.
- * @param request
- * The request.
- * @param response
- * The response.
- *
- * @return <code>true</code> iff the request was handled.
- */
- boolean service(final boolean isLeaderRequest,
- final HttpServletRequest request, final HttpServletResponse response)
- throws ServletException, IOException;
-
- /**
- * Return the URL to which a non-idempotent request will be proxied.
- *
- * @param req
- * The request.
- *
- * @return The proxyTo URL -or- <code>null</code> if we could not find a
- * service to which we could proxy this request.
- */
- String getLeaderURL(HttpServletRequest req);
-
- /**
- * Return the URL to which a <strong>read-only</strong> request will be
- * proxied. The returned URL must include the protocol, hostname and port
- * (if a non-default port will be used) as well as the target request path.
- *
- * @param req
- * The request.
- *
- * @return The proxyTo URL -or- <code>null</code> if we could not find a
- * service to which we could proxy this request.
- */
- String getReaderURL(HttpServletRequest req);
-
-}
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHAPolicyLifeCycle.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHAPolicyLifeCycle.java 2014-05-05 17:17:10 UTC (rev 8200)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHAPolicyLifeCycle.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -1,47 +0,0 @@
-/**
-Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-package com.bigdata.rdf.sail.webapp;
-
-import javax.servlet.ServletConfig;
-import javax.servlet.ServletException;
-
-import com.bigdata.journal.IIndexManager;
-
-public interface IHAPolicyLifeCycle {
-
- /**
- * Initialize the policy.
- *
- * @param servletConfig
- * @param indexManager
- */
- void init(ServletConfig servletConfig, IIndexManager indexManager)
- throws ServletException;
-
- /**
- * Destroy the policy (stop any asynchronous processing, release any
- * resources).
- */
- void destroy();
-
-}
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHARequestURIRewriter.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHARequestURIRewriter.java 2014-05-05 17:17:10 UTC (rev 8200)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHARequestURIRewriter.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -1,95 +0,0 @@
-/**
-Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-package com.bigdata.rdf.sail.webapp;
-
-import javax.servlet.http.HttpServletRequest;
-
-import com.bigdata.rdf.sail.webapp.lbs.ServiceScore;
-
-/**
- * Interface for rewriting the Request-URI once the load balancer has determined
- * the target host and service to which the request will be proxied.
- *
- * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- */
-public interface IHARequestURIRewriter extends IHAPolicyLifeCycle {
-
- /**
- * Rewrite the <code>originalRequestURI</code> into a <a href=
- * "http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2"
- * >Request-URL</a> for the web application whose servlet context root is
- * given by the <code>proxyToRequestURI</code>.
- * <p>
- * Note: The <code>proxyToRequestURI</code> is include the protocol, host,
- * port, and servlet context path for the target service. It DOES NOT
- * include any information from the original request. The purpose of this
- * method is to modify the <code>originalRequestURI</code> in order to
- * obtain a fully qualified RequestURI for the service to which the request
- * will be proxied. For example:
- *
- * <pre>
- * full_prefix: /bigdata/LBS/leader
- * -or- full_prefix: /bigdata/LBS/read
- * originalRequestURI: http://ha1.example.com:8090/bigdata/LBS/read/sparql
- * proxyToRequestURI: http://ha3.example.com:8090/bigdata/
- * return: http://ha2.example.com:8090/bigdata/LBS/read/sparql
- * </pre>
- * <p>
- * Note: this method is only invoked if we will proxy to another service.
- * Therefore, the <code>proxyToRequestURI</code> is never <code>null</code>.
- *
- * @param isLeaderRequest
- * <code>true</code> iff the request is directed to the leader.
- * @param full_prefix
- * The path prefix in the <code>originalRequestURI</code> that
- * which corresponds to the {@link HALoadBalancerServlet} and
- * which must be removed if the request is to be forwarded to a
- * local service.
- * @param originalRequestURI
- * The original <a href=
- * "http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2"
- * >Request-URL</a> from the <a
- * href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1"
- * >HTTP Request-Line</a>
- * @param proxyToRequestURI
- * The RequestURI for the root of the web application for the
- * target service and never <code>null</code>.
- * @param request
- * The original request.
- *
- * @return The fully qualified <a href=
- * "http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2"
- * >Request-URL</a> that will be used to proxy the http request to
- * the service identified by the <code>proxyToRequestURI</code>
- *
- * @see ServiceScore#getRequestURI()
- */
- public StringBuilder rewriteURI(//
- boolean isLeaderRequest,
- String full_prefix,//
- String originalRequestURI, //
- String proxyToRequestURI,//
- HttpServletRequest request//
- );
-
-}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractLBSPolicy.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractLBSPolicy.java 2014-05-05 17:17:10 UTC (rev 8200)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractLBSPolicy.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -51,7 +51,6 @@
import com.bigdata.quorum.QuorumListener;
import com.bigdata.rdf.sail.webapp.BigdataServlet;
import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet;
-import com.bigdata.rdf.sail.webapp.IHALoadBalancerPolicy;
/**
* Abstract base class establishes a listener for quorum events, tracks the
Copied: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/DefaultHARequestURIRewriter.java (from rev 8166, branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DefaultHARequestURIRewriter.java)
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/DefaultHARequestURIRewriter.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/DefaultHARequestURIRewriter.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -0,0 +1,85 @@
+/**
+Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.rdf.sail.webapp.lbs;
+
+import javax.servlet.ServletConfig;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+
+import com.bigdata.journal.IIndexManager;
+
+/**
+ * Default implementation.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+public class DefaultHARequestURIRewriter implements IHARequestURIRewriter {
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation is a NOP.
+ */
+ @Override
+ public void init(ServletConfig servletConfig, IIndexManager indexManager)
+ throws ServletException {
+
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This implementation is a NOP.
+ */
+ @Override
+ public void destroy() {
+
+ }
+
+ @Override
+ public StringBuilder rewriteURI(final boolean isLeaderRequest,
+ final String full_prefix, final String originalRequestURL,
+ final String proxyToRequestURL, final HttpServletRequest request) {
+
+ final StringBuilder uri = new StringBuilder(proxyToRequestURL);
+
+ if (proxyToRequestURL.endsWith("/"))
+ uri.setLength(uri.length() - 1);
+
+ final String rest = originalRequestURL.substring(full_prefix.length());
+
+ if (!rest.startsWith("/"))
+ uri.append("/");
+
+ uri.append(rest);
+
+ final String query = request.getQueryString();
+
+ if (query != null)
+ uri.append("?").append(query);
+
+ return uri;
+
+ }
+
+}
Copied: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/IHALoadBalancerPolicy.java (from rev 8166, branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java)
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/IHALoadBalancerPolicy.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/IHALoadBalancerPolicy.java 2014-05-05 18:58:36 UTC (rev 8201)
@@ -0,0 +1,88 @@
+/**
+Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.rdf.sail.webapp.lbs;
+
+import java.io.IOException;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet;
+
+/**
+ * Load balancer policy interface.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ *
+ * @see HALoadBalancerServlet
+ * @see <a href="http://trac.bigdata.com/ticket/624">HA Load Balancer</a>
+ */
+public interface IHALoadBalancerPolicy extends IHAPolicyLifeCycle {
+
+ /**
+ * Invoked for each request. If the response is not committed, then it will
+ * be handled by the {@link HALoadBalancerServlet}.
+ *
+ * @param isLeaderRequest
+ * <code>true</code> iff this request must be directed to the
+ * leaeder and <code>false</code> iff this request may be load
+ * balanced over the joined services. UPDATEs MUST be handled by
+ * the leader. Read requests can be handled by any service that
+ * is joined with the met quorum.
+ * @param request
+ * The request.
+ * @param response
+ * The response.
+ *
+ * @return <code>true</code> iff the request was handled.
+ */
+ boolean service(final boolean isLeaderRequest,
+ final HttpServletRequest request, final HttpServletResponse response)
+ throws ServletException, IOException;
+
+ /**
+ * Return the URL to which a non-idempotent request will be proxied.
+ *
+...
[truncated message content] |
|
From: <tho...@us...> - 2014-05-06 10:16:00
|
Revision: 8205
http://sourceforge.net/p/bigdata/code/8205
Author: thompsonbry
Date: 2014-05-06 10:15:56 +0000 (Tue, 06 May 2014)
Log Message:
-----------
Added an RMI_PORT for the exporter for the HAGlue interface. This can be set from the startHAServices script.
The ganglia policy needs to chose a target host in inverse proportion to the workload on that host.
See #624 (HA LBS)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/GangliaLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config
branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices
branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-05-06 00:30:25 UTC (rev 8204)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-05-06 10:15:56 UTC (rev 8205)
@@ -60,6 +60,9 @@
private static fedname = "benchmark";
+ // The RMI port for the HAGlue interface (may be ZERO for a random port).
+ private static rmiPort = 9080;
+
// write replication pipeline port (listener).
private static haPort = 9090;
@@ -250,6 +253,9 @@
replicationFactor = bigdata.replicationFactor;
+ exporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(bigdata.rmiPort),
+ new BasicILFactory());
+
}
/*
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config 2014-05-06 00:30:25 UTC (rev 8204)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config 2014-05-06 10:15:56 UTC (rev 8205)
@@ -60,9 +60,9 @@
private static fedname = "benchmark";
- // NanoSparqlServer (http) port.
- private static nssPort = ConfigMath.add(8090,1);
-
+ // The RMI port for the HAGlue interface (may be ZERO for a random port).
+ private static rmiPort = ConfigMath.add(9080,1);
+
// write replication pipeline port (listener).
private static haPort = ConfigMath.add(9090,1);
@@ -252,6 +252,9 @@
replicationFactor = bigdata.replicationFactor;
+ exporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(bigdata.rmiPort),
+ new BasicILFactory());
+
}
/*
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-05-06 00:30:25 UTC (rev 8204)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-05-06 10:15:56 UTC (rev 8205)
@@ -60,6 +60,9 @@
private static fedname = "benchmark";
+ // The RMI port for the HAGlue interface (may be ZERO for a random port).
+ private static rmiPort = ConfigMath.add(9080,1);
+
// write replication pipeline port (listener).
private static haPort = ConfigMath.add(9090,2);
@@ -249,6 +252,9 @@
replicationFactor = bigdata.replicationFactor;
+ exporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(bigdata.rmiPort),
+ new BasicILFactory());
+
}
/*
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-05-06 00:30:25 UTC (rev 8204)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-05-06 10:15:56 UTC (rev 8205)
@@ -60,6 +60,9 @@
private static fedname = "benchmark";
+ // The RMI port for the HAGlue interface (may be ZERO for a random port).
+ private static rmiPort = 9080;
+
// write replication pipeline port (listener).
private static haPort = 9090;
@@ -257,6 +260,9 @@
replicationFactor = bigdata.replicationFactor;
+ exporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(bigdata.rmiPort),
+ new BasicILFactory());
+
// Use the overridden version of the HAJournal by default so we get the
// HAGlueTest API for every test.
HAJournalClass = "com.bigdata.journal.jini.ha.HAJournalTest";
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config 2014-05-06 00:30:25 UTC (rev 8204)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config 2014-05-06 10:15:56 UTC (rev 8205)
@@ -60,6 +60,9 @@
private static fedname = "benchmark";
+ // The RMI port for the HAGlue interface (may be ZERO for a random port).
+ private static rmiPort = ConfigMath.add(9080,1);
+
// write replication pipeline port (listener).
private static haPort = ConfigMath.add(9090,1);
@@ -256,6 +259,9 @@
replicationFactor = bigdata.replicationFactor;
+ exporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(bigdata.rmiPort),
+ new BasicILFactory());
+
// Use the overridden version of the HAJournal by default so we get the
// HAGlueTest API for every test.
HAJournalClass = "com.bigdata.journal.jini.ha.HAJournalTest";
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-05-06 00:30:25 UTC (rev 8204)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-05-06 10:15:56 UTC (rev 8205)
@@ -60,6 +60,9 @@
private static fedname = "benchmark";
+ // The RMI port for the HAGlue interface (may be ZERO for a random port).
+ private static rmiPort = ConfigMath.add(9080,2);
+
// write replication pipeline port (listener).
private static haPort = ConfigMath.add(9090,2);
@@ -256,6 +259,9 @@
replicationFactor = bigdata.replicationFactor;
+ exporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(bigdata.rmiPort),
+ new BasicILFactory());
+
// Use the overridden version of the HAJournal by default so we get the
// HAGlueTest API for every test.
HAJournalClass = "com.bigdata.journal.jini.ha.HAJournalTest";
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config 2014-05-06 00:30:25 UTC (rev 8204)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config 2014-05-06 10:15:56 UTC (rev 8205)
@@ -60,9 +60,9 @@
private static fedname = "benchmark";
- // NanoSparqlServer (http) port.
- private static nssPort = ConfigMath.add(8090,3);
-
+ // The RMI port for the HAGlue interface (may be ZERO for a random port).
+ private static rmiPort = ConfigMath.add(9080,3);
+
// write replication pipeline port (listener).
private static haPort = ConfigMath.add(9090,3);
@@ -259,6 +259,9 @@
replicationFactor = bigdata.replicationFactor;
+ exporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(bigdata.rmiPort),
+ new BasicILFactory());
+
// Use the overridden version of the HAJournal by default so we get the
// HAGlueTest API for every test.
HAJournalClass = "com.bigdata.journal.jini.ha.HAJournalTest";
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config 2014-05-06 00:30:25 UTC (rev 8204)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config 2014-05-06 10:15:56 UTC (rev 8205)
@@ -60,9 +60,9 @@
private static fedname = "benchmark";
- // NanoSparqlServer (http) port.
- private static nssPort = ConfigMath.add(8090,4);
-
+ // The RMI port for the HAGlue interface (may be ZERO for a random port).
+ private static rmiPort = ConfigMath.add(9080,4);
+
// write replication pipeline port (listener).
private static haPort = ConfigMath.add(9090,4);
@@ -259,6 +259,9 @@
replicationFactor = bigdata.replicationFactor;
+ exporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(bigdata.rmiPort),
+ new BasicILFactory());
+
// Use the overridden version of the HAJournal by default so we get the
// HAGlueTest API for every test.
HAJournalClass = "com.bigdata.journal.jini.ha.HAJournalTest";
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/GangliaLBSPolicy.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/GangliaLBSPolicy.java 2014-05-06 00:30:25 UTC (rev 8204)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/GangliaLBSPolicy.java 2014-05-06 10:15:56 UTC (rev 8205)
@@ -717,7 +717,12 @@
* Stochastically select the target host based on the current host
* workload.
*
- * We need to ignore any host that is not joined with the met quorum....
+ * Note: We need to ignore any host that is not joined with the met
+ * quorum....
+ *
+ * Note: The host is selected with a probability that is INVERSELY
+ * proportional to normalized host load. If the normalized host load is
+ * .75, then the host is selected with a probability of .25.
*/
final double d = rand.nextDouble();
@@ -726,13 +731,13 @@
final List<ServiceScore> foundServices = new LinkedList<ServiceScore>();
for (int i = 0; i < hostScores.length; i++) {
hostScore = hostScores[i];
- sum += hostScore.score;
+ sum += 1d - hostScore.score; // Note: Choice is inversely proportional to normalized workload!
if (hostScore.hostname == null) // can't use w/o hostname.
continue;
if (d > sum) // scan further.
continue;
/*
- * We found a host having a position in the cumultive ordering of
+ * We found a host having a position in the cumulative ordering of
* the normalized host workloads that is GTE to the random number.
* Now we need to find one (or more) service(s) on that host.
*
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2014-05-06 00:30:25 UTC (rev 8204)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2014-05-06 10:15:56 UTC (rev 8205)
@@ -62,6 +62,9 @@
*/
private static fedname = ConfigMath.getProperty("FEDNAME","benchmark");
+ // The RMI port for the HAGlue interface (may be ZERO for a random port).
+ private static rmiPort = Integer.parseInt(ConfigMath.getProperty("RMI_PORT","9080"));;
+
// write replication pipeline port (listener).
private static haPort = Integer.parseInt(ConfigMath.getProperty("HA_PORT","9090"));
@@ -304,6 +307,9 @@
replicationFactor = bigdata.replicationFactor;
+ exporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(bigdata.rmiPort),
+ new BasicILFactory());
+
haLogDir = bigdata.haLogDir;
snapshotDir = bigdata.snapshotDir;
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-06 00:30:25 UTC (rev 8204)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-06 10:15:56 UTC (rev 8205)
@@ -80,6 +80,7 @@
-DGROUPS=${GROUPS}\
-DLOCATORS=${LOCATORS}\
-DZK_SERVERS=${ZK_SERVERS}\
+ -DRMI_PORT=${RMI_PORT}\
-DHA_PORT=${HA_PORT}\
"-Dcom.bigdata.hostname=${BIGDATA_HOSTNAME}"\
"-Djetty.port=${JETTY_PORT}"\
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA 2014-05-06 00:30:25 UTC (rev 8204)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA 2014-05-06 10:15:56 UTC (rev 8205)
@@ -38,6 +38,7 @@
# All of these have defaults. Override as necessary.
#export REPLICATION_FACTOR=3
+#export RMI_PORT=9080
#export HA_PORT=9090
#export JETTY_PORT=8080
#export JETTY_XML=var/jetty/WEB-INF/jetty.xml
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-06 13:54:45
|
Revision: 8207
http://sourceforge.net/p/bigdata/code/8207
Author: thompsonbry
Date: 2014-05-06 13:54:41 +0000 (Tue, 06 May 2014)
Log Message:
-----------
Added the jetty-jmx and jetty-jndi dependencies. The jmx dependency is necessary if you want to export the MBeans from the jetty server to another host. The jndi dependency is just foward looking - jndi provides a flexible mechanism for configuring jetty.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/.classpath
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml
branches/BIGDATA_RELEASE_1_3_0/build.xml
branches/BIGDATA_RELEASE_1_3_0/pom.xml
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-jmx-9.1.4.v20140401.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-jndi-9.1.4.v20140401.jar
Modified: branches/BIGDATA_RELEASE_1_3_0/.classpath
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-06 10:49:40 UTC (rev 8206)
+++ branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-06 13:54:41 UTC (rev 8207)
@@ -58,6 +58,8 @@
<classpathentry exported="true" kind="lib" path="bigdata/lib/unimi/fastutil-5.1.5.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/lucene/lucene-analyzers-3.0.0.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/lucene/lucene-core-3.0.0.jar"/>
+ <classpathentry kind="lib" path="bigdata/lib/jetty/jetty-jmx-9.1.4.v20140401.jar"/>
+ <classpathentry kind="lib" path="bigdata/lib/jetty/jetty-jndi-9.1.4.v20140401.jar"/>
<classpathentry exported="true" kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/high-scale-lib-v1.1.2.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/junit-ext-1.1-b3-dev.jar"/>
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-jmx-9.1.4.v20140401.jar
===================================================================
(Binary files differ)
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-jmx-9.1.4.v20140401.jar
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-jmx-9.1.4.v20140401.jar 2014-05-06 10:49:40 UTC (rev 8206)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-jmx-9.1.4.v20140401.jar 2014-05-06 13:54:41 UTC (rev 8207)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-jmx-9.1.4.v20140401.jar
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-jndi-9.1.4.v20140401.jar
===================================================================
(Binary files differ)
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-jndi-9.1.4.v20140401.jar
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-jndi-9.1.4.v20140401.jar 2014-05-06 10:49:40 UTC (rev 8206)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-jndi-9.1.4.v20140401.jar 2014-05-06 13:54:41 UTC (rev 8207)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/jetty-jndi-9.1.4.v20140401.jar
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-06 10:49:40 UTC (rev 8206)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-06 13:54:41 UTC (rev 8207)
@@ -31,6 +31,46 @@
<Set name="detailedDump">false</Set>
</Get>
+ <!-- =========================================================== -->
+ <!-- Get the platform mbean server -->
+ <!-- =========================================================== -->
+ <Call id="MBeanServer" class="java.lang.management.ManagementFactory"
+ name="getPlatformMBeanServer" />
+
+ <!-- =========================================================== -->
+ <!-- Initialize the Jetty MBean container -->
+ <!-- =========================================================== -->
+ <Call name="addBean">
+ <Arg>
+ <New id="MBeanContainer" class="org.eclipse.jetty.jmx.MBeanContainer">
+ <Arg>
+ <Ref refid="MBeanServer" />
+ </Arg>
+ </New>
+ </Arg>
+ </Call>
+
+ <!-- Add the static log to the MBean server. -->
+ <Call name="addBean">
+ <Arg>
+ <New class="org.eclipse.jetty.util.log.Log" />
+ </Arg>
+ </Call>
+
+ <!-- For remote MBean access (optional) -->
+ <New id="ConnectorServer" class="org.eclipse.jetty.jmx.ConnectorServer">
+ <Arg>
+ <New class="javax.management.remote.JMXServiceURL">
+ <Arg type="java.lang.String">rmi</Arg>
+ <Arg type="java.lang.String" />
+ <Arg type="java.lang.Integer"><SystemProperty name="jetty.jmxrmiport" default="1090"/></Arg>
+ <Arg type="java.lang.String">/jndi/rmi://<SystemProperty name="jetty.jmxrmihost" default="localhost"/>:<SystemProperty name="jetty.jmxrmiport" default="1099"/>/jmxrmi</Arg>
+ </New>
+ </Arg>
+ <Arg>org.eclipse.jetty.jmx:name=rmiconnectorserver</Arg>
+ <Call name="start" />
+ </New>
+
<!-- =========================================================== -->
<!-- Http Configuration. -->
<!-- This is a common configuration instance used by all -->
@@ -88,7 +128,7 @@
<Item>
<!-- This is the bigdata web application. -->
<New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext">
- <Set name="resourceBase">
+ <Set name="war">
<!-- The location of the top-level of the bigdata webapp. -->
<SystemProperty name="jetty.resourceBase" default="bigdata-war/src" />
</Set>
@@ -98,30 +138,11 @@
<Set name="extractWAR">false</Set>
</New>
</Item>
- <Item>
- <!-- This appears to be necessary in addition to the above. -->
- <!-- Without this, it will not resolve http://localhost:8080/ -->
- <!-- and can fail to deliver some of the static content. -->
- <New id="ResourceHandler" class="org.eclipse.jetty.server.handler.ResourceHandler">
- <Set name="resourceBase">
- <!-- The location of the top-level of the bigdata webapp. -->
- <SystemProperty name="jetty.resourceBase" default="bigdata-war/src" />
- </Set>
- <Set name="welcomeFiles">
- <Array type="java.lang.String">
- <Item>html/index.html</Item>
- </Array>
- </Set>
- </New>
- </Item>
- <!-- <Item>
- <New id="DefaultHandler" class="org.eclipse.jetty.server.handler.DefaultHandler"></New>
- </Item> -->
</Array>
</Set>
</New>
</Set>
-
+
<!-- =========================================================== -->
<!-- extra server options -->
<!-- =========================================================== -->
Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-06 10:49:40 UTC (rev 8206)
+++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-06 13:54:41 UTC (rev 8207)
@@ -980,8 +980,12 @@
tofile="${dist.lib}/jetty-continuation.jar" />
<copy file="${bigdata-jetty.lib}/jetty-http-${jetty.version}.jar"
tofile="${dist.lib}/jetty-http.jar" />
- <copy file="${bigdata-jetty.lib}/jetty-io-${jetty.version}.jar"
- tofile="${dist.lib}/jetty-io.jar" />
+ <copy file="${bigdata-jetty.lib}/jetty-io-${jetty.version}.jar"
+ tofile="${dist.lib}/jetty-io.jar" />
+ <copy file="${bigdata-jetty.lib}/jetty-jmx-${jetty.version}.jar"
+ tofile="${dist.lib}/jetty-jmx.jar" />
+ <copy file="${bigdata-jetty.lib}/jetty-jndi-${jetty.version}.jar"
+ tofile="${dist.lib}/jetty-jndi.jar" />
<copy file="${bigdata-jetty.lib}/jetty-server-${jetty.version}.jar"
tofile="${dist.lib}/jetty-server.jar" />
<copy file="${bigdata-jetty.lib}/jetty-util-${jetty.version}.jar"
@@ -1208,10 +1212,10 @@
todir="${dist.dir}/doc" />
<!-- Stage documentation from the wiki. -->
- <get dest="${dist.doc}/HAJournalServer.html"
+ <get dest="${dist.doc}/HAJournalServer.html" ignoreerrors="true"
src="http://wiki.bigdata.com/wiki/index.php/HAJournalServer?printable=yes"
/>
- <get dest="${dist.doc}/NanoSparqlServer.html"
+ <get dest="${dist.doc}/NanoSparqlServer.html" ignoreerrors="true"
src="http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer?printable=yes"
/>
@@ -1752,7 +1756,7 @@
<!-- TODO ${path.separator}${dist.lib}/bigdata-gas.jar -->
<property name="javac.test.classpath"
- value="${classes.dir}${path.separator}${junit.jar}${path.separator}${junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${sesame-rio-test.jar}${path.separator}${dist.lib}/classserver.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/bigdata-ganglia.jar${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/icu4j-charset.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar${path.separator}${dist.lib}/jetty-continuation.jar${path.separator}${dist.lib}/jetty-http.jar${path.separator}${dist.lib}/jetty-io.jar${path.separator}${dist.lib}/jetty-server.jar${path.separator}${dist.lib}/jetty-util.jar${path.separator}${dist.lib}/jetty-webapp.jar${path.separator}${dist.lib}/jetty-servlet.jar${path.separator}${dist.lib}/jetty-security.jar${path.separator}${dist.lib}/jetty-xml.jar${path.separator}${dist.lib}/jetty-rewrite.jar${path.separator}${dist.lib}/jetty-client.jar${path.separator}${dist.lib}/jetty-proxy.jar${path.separator}${dist.lib}/servlet-api.jar${path.separator}${dist.lib}/commons-codec.jar${path.separator}${dist.lib}/commons-fileupload.jar${path.separator}${dist.lib}/commons-io.jar${path.separator}${dist.lib}/commons-logging.jar${path.separator}${dist.lib}/httpclient.jar${path.separator}${dist.lib}/httpclient-cache.jar${path.separator}${dist.lib}/httpcore.jar${path.separator}${dist.lib}/httpmime.jar" />
+ value="${classes.dir}${path.separator}${junit.jar}${path.separator}${junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${sesame-rio-test.jar}${path.separator}${dist.lib}/classserver.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/bigdata-ganglia.jar${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/icu4j-charset.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar${path.separator}${dist.lib}/jetty-continuation.jar${path.separator}${dist.lib}/jetty-http.jar${path.separator}${dist.lib}/jetty-io.jar${path.separator}${dist.lib}/jetty-jmx.jar${path.separator}${dist.lib}/jetty-jndi.jar${path.separator}${dist.lib}/jetty-server.jar${path.separator}${dist.lib}/jetty-util.jar${path.separator}${dist.lib}/jetty-webapp.jar${path.separator}${dist.lib}/jetty-servlet.jar${path.separator}${dist.lib}/jetty-security.jar${path.separator}${dist.lib}/jetty-xml.jar${path.separator}${dist.lib}/jetty-rewrite.jar${path.separator}${dist.lib}/jetty-client.jar${path.separator}${dist.lib}/jetty-proxy.jar${path.separator}${dist.lib}/servlet-api.jar${path.separator}${dist.lib}/commons-codec.jar${path.separator}${dist.lib}/commons-fileupload.jar${path.separator}${dist.lib}/commons-io.jar${path.separator}${dist.lib}/commons-logging.jar${path.separator}${dist.lib}/httpclient.jar${path.separator}${dist.lib}/httpclient-cache.jar${path.separator}${dist.lib}/httpcore.jar${path.separator}${dist.lib}/httpmime.jar" />
<echo>javac
</echo>
@@ -2112,6 +2116,8 @@
<pathelement location="${dist.lib}/jetty-continuation.jar" />
<pathelement location="${dist.lib}/jetty-http.jar" />
<pathelement location="${dist.lib}/jetty-io.jar" />
+ <pathelement location="${dist.lib}/jetty-jmx.jar" />
+ <pathelement location="${dist.lib}/jetty-jndi.jar" />
<pathelement location="${dist.lib}/jetty-server.jar" />
<pathelement location="${dist.lib}/jetty-util.jar" />
<pathelement location="${dist.lib}/jetty-webapp.jar" />
@@ -2133,7 +2139,7 @@
</path>
<property name="run.class.path"
- value="${junit.jar}${path.separator}${bigdata-test.jar}${path.separator}${junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${sesame-rio-test.jar}${path.separator}${dist.lib}/bigdata.jar${path.separator}${dist.lib}/colt.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/bigdata-ganglia.jar${path.separator}${dist.lib}/bigdata-gas${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/icu4j-charset.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/slf4j-log4j.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar${path.separator}${dist.lib}/jetty-continuation.jar${path.separator}${dist.lib}/jetty-http.jar${path.separator}${dist.lib}/jetty-io.jar${path.separator}${dist.lib}/jetty-server.jar${path.separator}${dist.lib}/jetty-util.jar${path.separator}${dist.lib}/jetty-webapp.jar${path.separator}${dist.lib}/jetty-servlet.jar${path.separator}${dist.lib}/jetty-security.jar${path.separator}${dist.lib}/jetty-xml.jar${path.separator}${dist.lib}/jetty-rewrite.jar${path.separator}${dist.lib}/jetty-client.jar${path.separator}${dist.lib}/jetty-proxy.jar${path.separator}${dist.lib}/servlet-api.jar${path.separator}${dist.lib}/commons-codec.jar${path.separator}${dist.lib}/commons-fileupload.jar${path.separator}${dist.lib}/commons-io.jar${path.separator}${dist.lib}/commons-logging.jar${path.separator}${dist.lib}/httpclient.jar${path.separator}${dist.lib}/httpclient-cache.jar${path.separator}${dist.lib}/httpcore.jar${path.separator}${dist.lib}/httpmime.jar" />
+ value="${junit.jar}${path.separator}${bigdata-test.jar}${path.separator}${junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${sesame-rio-test.jar}${path.separator}${dist.lib}/bigdata.jar${path.separator}${dist.lib}/colt.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/bigdata-ganglia.jar${path.separator}${dist.lib}/bigdata-gas${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/icu4j-charset.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/slf4j-log4j.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar${path.separator}${dist.lib}/jetty-continuation.jar${path.separator}${dist.lib}/jetty-http.jar${path.separator}${dist.lib}/jetty-io.jar${path.separator}${dist.lib}/jetty-jmx.jar${path.separator}${dist.lib}/jetty-jndi.jar${path.separator}${dist.lib}/jetty-server.jar${path.separator}${dist.lib}/jetty-util.jar${path.separator}${dist.lib}/jetty-webapp.jar${path.separator}${dist.lib}/jetty-servlet.jar${path.separator}${dist.lib}/jetty-security.jar${path.separator}${dist.lib}/jetty-xml.jar${path.separator}${dist.lib}/jetty-rewrite.jar${path.separator}${dist.lib}/jetty-client.jar${path.separator}${dist.lib}/jetty-proxy.jar${path.separator}${dist.lib}/servlet-api.jar${path.separator}${dist.lib}/commons-codec.jar${path.separator}${dist.lib}/commons-fileupload.jar${path.separator}${dist.lib}/commons-io.jar${path.separator}${dist.lib}/commons-logging.jar${path.separator}${dist.lib}/httpclient.jar${path.separator}${dist.lib}/httpclient-cache.jar${path.separator}${dist.lib}/httpcore.jar${path.separator}${dist.lib}/httpmime.jar" />
<echo> classpath: ${run.class.path}
</echo>
@@ -2279,6 +2285,8 @@
<sysproperty key="jetty-continuation.jar" value="${dist.lib}/jetty-continuation.jar" />
<sysproperty key="jetty-http.jar" value="${dist.lib}/jetty-http.jar" />
<sysproperty key="jetty-io.jar" value="${dist.lib}/jetty-io.jar" />
+ <sysproperty key="jetty-jmx.jar" value="${dist.lib}/jetty-jmx.jar" />
+ <sysproperty key="jetty-jndi.jar" value="${dist.lib}/jetty-jndi.jar" />
<sysproperty key="jetty-server.jar" value="${dist.lib}/jetty-server.jar" />
<sysproperty key="jetty-util.jar" value="${dist.lib}/jetty-util.jar" />
<sysproperty key="jetty-webapp.jar" value="${dist.lib}/jetty-webapp.jar" />
Modified: branches/BIGDATA_RELEASE_1_3_0/pom.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/pom.xml 2014-05-06 10:49:40 UTC (rev 8206)
+++ branches/BIGDATA_RELEASE_1_3_0/pom.xml 2014-05-06 13:54:41 UTC (rev 8207)
@@ -409,6 +409,8 @@
jetty-continuation
jetty-http
jetty-io
+ jetty-jmx
+ jetty-jndi
jetty-server
jetty-util
jetty-webapp
@@ -435,6 +437,16 @@
<artifactId>jetty-io</artifactId>
<version>${jetty.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-jmx</artifactId>
+ <version>${jetty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-jndi</artifactId>
+ <version>${jetty.version}</version>
+ </dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-server</artifactId>
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-07 13:09:24
|
Revision: 8212
http://sourceforge.net/p/bigdata/code/8212
Author: thompsonbry
Date: 2014-05-07 13:09:19 +0000 (Wed, 07 May 2014)
Log Message:
-----------
HAJournalServer now understands the jetty.dump.start environment variable.
Modified the HA CI test suite to pass through the jetty.dump.start environment variable if set in the environment that runs the test suite JVM.
Rolled back a change to jetty.xml that is breaking the HA CI test server startup. I will have to take this up with the jetty folks. This change was based on a recommended simplification of jetty.xml. The exception from HA CI was:
{{{
WARN : 07:59:54,422 1620 com.bigdata.journal.jini.ha.HAJournalServer org.eclipse.jetty.webapp.WebAppContext.doStart(WebAppContext.java:506): Failed startup of context o.e.j.w.WebAppContext@718acd64{/bigdata,null,null}{"."}
java.io.FileNotFoundException: "."
at org.eclipse.jetty.webapp.WebInfConfiguration.unpack(WebInfConfiguration.java:493)
at org.eclipse.jetty.webapp.WebInfConfiguration.preConfigure(WebInfConfiguration.java:72)
at org.eclipse.jetty.webapp.WebAppContext.preConfigure(WebAppContext.java:460)
at org.eclipse.jetty.webapp.WebAppContext.doStart(WebAppContext.java:496)
at org.eclipse.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:68)
at org.eclipse.jetty.util.component.ContainerLifeCycle.start(ContainerLifeCycle.java:125)
at org.eclipse.jetty.util.component.ContainerLifeCycle.doStart(ContainerLifeCycle.java:107)
at org.eclipse.jetty.server.handler.AbstractHandler.doStart(AbstractHandler.java:60)
at org.eclipse.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:68)
at org.eclipse.jetty.util.component.ContainerLifeCycle.start(ContainerLifeCycle.java:125)
at org.eclipse.jetty.server.Server.start(Server.java:358)
at org.eclipse.jetty.util.component.ContainerLifeCycle.doStart(ContainerLifeCycle.java:107)
at org.eclipse.jetty.server.handler.AbstractHandler.doStart(AbstractHandler.java:60)
at org.eclipse.jetty.server.Server.doStart(Server.java:325)
at org.eclipse.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:68)
at com.bigdata.journal.jini.ha.HAJournalServer.startNSS(HAJournalServer.java:4550)
at com.bigdata.journal.jini.ha.HAJournalServer.startUpHook(HAJournalServer.java:883)
at com.bigdata.journal.jini.ha.AbstractServer.run(AbstractServer.java:1881)
at com.bigdata.journal.jini.ha.HAJournalServer.<init>(HAJournalServer.java:623)
at com.bigdata.journal.jini.ha.HAJournalServer.main(HAJournalServer.java:4763)
}}}
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-06 19:51:59 UTC (rev 8211)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-07 13:09:19 UTC (rev 8212)
@@ -4549,6 +4549,13 @@
// Start the server.
jettyServer.start();
+ if (Boolean.getBoolean("jetty.dump.start")) {
+
+ // Support the jetty dump-after-start semantics.
+ log.warn(jettyServer.dump());
+
+ }
+
/*
* Report *an* effective URL of this service.
*
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-05-06 19:51:59 UTC (rev 8211)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-05-07 13:09:19 UTC (rev 8212)
@@ -2137,6 +2137,13 @@
private final String serviceName;
private final UUID serviceId;
private final int jettyPort;
+ /**
+ * The value of this environment variable is passed down. You can set
+ * this environment variable to force jetty to dump its internal start
+ * after start.
+ */
+ private final boolean jettyDumpStart = Boolean
+ .getBoolean("jetty.dump.start");
// private final File serviceDir;
private final String[] args;
@@ -2230,6 +2237,12 @@
private final String JETTY_RESOURCE_BASE = "jetty.resourceBase";
/**
+ * Used to override the <code>jetty.dump.start</code> environment
+ * property.
+ */
+ private final String TEST_JETTY_DUMP_START = "jetty.dump.start";
+
+ /**
* The absolute effective path of the service directory. This is
* overridden on the {@link #TEST_SERVICE_DIR} environment variable
* and in the deployed HAJournal.config file in order to have the
@@ -2277,6 +2290,9 @@
// Override the location of the webapp as deployed.
cmds.add("-D" + JETTY_RESOURCE_BASE + "=\".\"");
+ // Override the jetty.dump.start.
+ cmds.add("-D" + TEST_JETTY_DUMP_START + "=" + jettyDumpStart);
+
super.addCommandArgs(cmds);
for (String arg : args) {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-06 19:51:59 UTC (rev 8211)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-07 13:09:19 UTC (rev 8212)
@@ -122,15 +122,14 @@
<!-- =========================================================== -->
<!-- Set handler Collection Structure -->
<!-- =========================================================== -->
+ <!-- Recommended approach: does not work for HA CI test suite.
<Set name="handler">
<New id="Handlers" class="org.eclipse.jetty.server.handler.HandlerCollection">
<Set name="handlers">
<Array type="org.eclipse.jetty.server.Handler">
<Item>
- <!-- This is the bigdata web application. -->
<New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext">
<Set name="war">
- <!-- The location of the top-level of the bigdata webapp. -->
<SystemProperty name="jetty.resourceBase" default="bigdata-war/src" />
</Set>
<Set name="contextPath">/bigdata</Set>
@@ -142,6 +141,46 @@
</Array>
</Set>
</New>
+ </Set> -->
+ <Set name="handler">
+ <New id="Handlers" class="org.eclipse.jetty.server.handler.HandlerCollection">
+ <Set name="handlers">
+ <Array type="org.eclipse.jetty.server.Handler">
+ <Item>
+ <!-- This is the bigdata web application. -->
+ <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext">
+ <Set name="resourceBase">
+ <!-- The location of the top-level of the bigdata webapp. -->
+ <SystemProperty name="jetty.resourceBase" default="bigdata-war/src" />
+ </Set>
+ <Set name="contextPath">/bigdata</Set>
+ <Set name="descriptor">WEB-INF/web.xml</Set>
+ <Set name="parentLoaderPriority">true</Set>
+ <Set name="extractWAR">false</Set>
+ </New>
+ </Item>
+ <Item>
+ <!-- This appears to be necessary in addition to the above. -->
+ <!-- Without this, it will not resolve http://localhost:8080/ -->
+ <!-- and can fail to deliver some of the static content. -->
+ <New id="ResourceHandler" class="org.eclipse.jetty.server.handler.ResourceHandler">
+ <Set name="resourceBase">
+ <!-- The location of the top-level of the bigdata webapp. -->
+ <SystemProperty name="jetty.resourceBase" default="bigdata-war/src" />
+ </Set>
+ <Set name="welcomeFiles">
+ <Array type="java.lang.String">
+ <Item>html/index.html</Item>
+ </Array>
+ </Set>
+ </New>
+ </Item>
+ <!-- <Item>
+ <New id="DefaultHandler" class="org.eclipse.jetty.server.handler.DefaultHandler"></New>
+ </Item> -->
+ </Array>
+ </Set>
+ </New>
</Set>
<!-- =========================================================== -->
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-07 14:16:35
|
Revision: 8216
http://sourceforge.net/p/bigdata/code/8216
Author: thompsonbry
Date: 2014-05-07 14:16:32 +0000 (Wed, 07 May 2014)
Log Message:
-----------
Identified a problem with the GangliaLBSPolicy where bigdata and bigdata-ganglia use the canonical (fully qualified) hostname and ganglia uses the local name of the host. This means that the host metrics are not being obtained by the GangliaLBSPolicy. While it is possible to override the hostname for ganglia starting with 3.2.x, this is quite a pain and could involve full restarts of gmond on all machines in the cluster. I have not yet resolved this issue, but I have added the ability to force the bigdata-ganglia implementation to use a hostname specified in an environment variable.
Added the ability to override the hostname for bigdata-ganglia using the com.bigdata.hostname environment variable per [1].
Updated the pom.xml and build.properties files for the bigdata-ganglia-1.0.3 release.
Published that release to our maven repo.
[1] http://trac.bigdata.com/ticket/886 (Provide workaround for bad reverse DNS setups)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/.classpath
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/build.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java
branches/BIGDATA_RELEASE_1_3_0/build.properties
branches/BIGDATA_RELEASE_1_3_0/pom.xml
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.3.jar
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.2.jar
Modified: branches/BIGDATA_RELEASE_1_3_0/.classpath
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-07 13:57:17 UTC (rev 8215)
+++ branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-07 14:16:32 UTC (rev 8216)
@@ -86,10 +86,10 @@
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-proxy-9.1.4.v20140401.jar" sourcepath="/Users/bryan/Downloads/org.eclipse.jetty.project-jetty-9.1.4.v20140401"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-rewrite-9.1.4.v20140401.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-security-9.1.4.v20140401.jar"/>
- <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-server-9.1.4.v20140401.jar"/>
+ <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-server-9.1.4.v20140401.jar" sourcepath="/Users/bryan/Downloads/org.eclipse.jetty.project-jetty-9.1.4.v20140401"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-servlet-9.1.4.v20140401.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-util-9.1.4.v20140401.jar"/>
- <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-webapp-9.1.4.v20140401.jar"/>
+ <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-webapp-9.1.4.v20140401.jar" sourcepath="/Users/bryan/Downloads/org.eclipse.jetty.project-jetty-9.1.4.v20140401"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-xml-9.1.4.v20140401.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata-sails/lib/jackson-core-2.2.3.jar"/>
<classpathentry kind="output" path="bin"/>
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.2.jar
===================================================================
(Binary files differ)
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.3.jar
===================================================================
(Binary files differ)
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.3.jar
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.3.jar 2014-05-07 13:57:17 UTC (rev 8215)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.3.jar 2014-05-07 14:16:32 UTC (rev 8216)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.3.jar
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java 2014-05-07 13:57:17 UTC (rev 8215)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java 2014-05-07 14:16:32 UTC (rev 8216)
@@ -27,9 +27,6 @@
package com.bigdata;
-import com.bigdata.counters.AbstractStatisticsCollector;
-import com.bigdata.jini.start.process.ProcessHelper;
-
/**
* A class for those few statics that it makes sense to reference from other
* places.
@@ -49,29 +46,31 @@
/**
* The name of an environment variable whose value will be used as the
* canoncial host name for the host running this JVM. This information is
- * used by the {@link AbstractStatisticsCollector}, which is responsible for
- * obtaining and reporting the canonical hostname for the {@link Banner} and
- * other purposes.
+ * used by the {@link com.bigdata.counters.AbstractStatisticsCollector},
+ * which is responsible for obtaining and reporting the canonical hostname
+ * for the {@link Banner} and other purposes.
*
- * @see AbstractStatisticsCollector
- * @see Banner
+ * @see com.bigdata.counters.AbstractStatisticsCollector
+ * @see com.bigdata.Banner
+ * @see com.bigdata.ganglia.GangliaService#HOSTNAME
* @see <a href="http://trac.bigdata.com/ticket/886" >Provide workaround for
* bad reverse DNS setups</a>
*/
public static final String HOSTNAME = "com.bigdata.hostname";
-
+
/**
* The #of lines of output from a child process which will be echoed onto
* {@link System#out} when that child process is executed. This makes it
* easy to track down why a child process dies during service start. If you
* want to see all output from the child process, then you should set the
- * log level for the {@link ProcessHelper} class to INFO.
+ * log level for the {@link com.bigdata.jini.start.process.ProcessHelper}
+ * class to INFO.
* <p>
- * Note: This needs to be more than the length of the {@link Banner} output
- * in order for anything related to the process behavior to be echoed on
- * {@link System#out}.
+ * Note: This needs to be more than the length of the
+ * {@link com.bigdata.Banner} output in order for anything related to the
+ * process behavior to be echoed on {@link System#out}.
*
- * @see ProcessHelper
+ * @see com.bigdata.jini.start.process.ProcessHelper
*/
public static int echoProcessStartupLineCount = 30;//Integer.MAX_VALUE;//100
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/build.properties
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/build.properties 2014-05-07 13:57:17 UTC (rev 8215)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/build.properties 2014-05-07 14:16:32 UTC (rev 8216)
@@ -38,7 +38,7 @@
release.dir=ant-release
# The build version.
-build.ver=1.0.2
+build.ver=1.0.3
# Set true to do a snapshot build. This changes the value of ${version} to
# include the date.
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java 2014-05-07 13:57:17 UTC (rev 8215)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java 2014-05-07 14:16:32 UTC (rev 8216)
@@ -1452,27 +1452,50 @@
}
- /**
- * The name for this host.
- */
- public static final String getCanonicalHostName() {
- String s;
- try {
- /*
- * Note: This should be the host *name* NOT an IP address of a
- * preferred Ethernet adaptor.
- */
- s = InetAddress.getLocalHost().getCanonicalHostName();
- } catch (Throwable t) {
- log.warn("Could not resolve canonical name for host: " + t);
- }
- try {
- s = InetAddress.getLocalHost().getHostName();
- } catch (Throwable t) {
- log.warn("Could not resolve name for host: " + t);
- s = "localhost";
- }
- return s;
+ /**
+ * The name of an environment variable whose value will be used as the
+ * canoncial host name for the host running this JVM. This information is
+ * used by the {@link GangliaService}, which is responsible for obtaining
+ * and reporting the canonical hostname for host metrics reporting.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/886" >Provide workaround for
+ * bad reverse DNS setups</a>
+ */
+ public static final String HOSTNAME = "com.bigdata.hostname";
+
+ /**
+ * The name for this host.
+ *
+ * @see #HOSTNAME
+ * @see <a href="http://trac.bigdata.com/ticket/886" >Provide workaround for
+ * bad reverse DNS setups</a>
+ */
+ public static final String getCanonicalHostName() {
+ String s = System.getProperty(HOSTNAME);
+ if (s != null) {
+ // Trim whitespace.
+ s = s.trim();
+ }
+ if (s != null && s.length() != 0) {
+ log.warn("Hostname override: hostname=" + s);
+ } else {
+ try {
+ /*
+ * Note: This should be the host *name* NOT an IP address of a
+ * preferred Ethernet adaptor.
+ */
+ s = InetAddress.getLocalHost().getCanonicalHostName();
+ } catch (Throwable t) {
+ log.warn("Could not resolve canonical name for host: " + t);
+ }
+ try {
+ s = InetAddress.getLocalHost().getHostName();
+ } catch (Throwable t) {
+ log.warn("Could not resolve name for host: " + t);
+ s = "localhost";
+ }
+ }
+ return s;
}
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-05-07 13:57:17 UTC (rev 8215)
+++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-05-07 14:16:32 UTC (rev 8216)
@@ -69,7 +69,7 @@
fastutil.version=5.1.5
dsiutils.version=1.0.6-020610
lgplutils.version=1.0.7-270114
-ganglia-version=1.0.2
+ganglia-version=1.0.3
gas-version=0.1.0
jackson-version=2.2.3
Modified: branches/BIGDATA_RELEASE_1_3_0/pom.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/pom.xml 2014-05-07 13:57:17 UTC (rev 8215)
+++ branches/BIGDATA_RELEASE_1_3_0/pom.xml 2014-05-07 14:16:32 UTC (rev 8216)
@@ -97,7 +97,7 @@
<fastutil.version>5.1.5</fastutil.version>
<dsiutils.version>1.0.6-020610</dsiutils.version>
<lgplutils.version>1.0.7-270114</lgplutils.version>
- <bigdata.ganglia.version>1.0.2</bigdata.ganglia.version>
+ <bigdata.ganglia.version>1.0.3</bigdata.ganglia.version>
<jackson.version>2.2.3</jackson.version>
</properties>
<!-- TODO Can we declare the versions of the dependencies here as
@@ -364,15 +364,15 @@
mvn deploy:deploy-file \
-DgroupId=com.bigdata \
-DartifactId=bigdata-ganglia \
- -Dversion=1.0.2 \
+ -Dversion=1.0.3 \
-Dpackaging=jar \
-DrepositoryId=bigdata.releases \
-Durl=scpexe://www.systap.com/srv/www/htdocs/systap.com/maven/releases/ \
- -Dfile=bigdata/lib/bigdata-ganglia-1.0.2.jar
+ -Dfile=bigdata/lib/bigdata-ganglia-1.0.3.jar
-->
<groupId>com.bigdata</groupId>
<artifactId>bigdata-ganglia</artifactId>
- <version>1.0.1</version>
+ <version>${bigdata.ganglia.version}</version>
<optional>true</optional>
</dependency>
<!--
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <jer...@us...> - 2014-05-07 15:39:10
|
Revision: 8220
http://sourceforge.net/p/bigdata/code/8220
Author: jeremy_carroll
Date: 2014-05-07 15:39:05 +0000 (Wed, 07 May 2014)
Log Message:
-----------
delete spurious character and ensure that the copyright symbol does not prevent the javadoc target from completing.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java
branches/BIGDATA_RELEASE_1_3_0/build.xml
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-05-07 15:31:17 UTC (rev 8219)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-05-07 15:39:05 UTC (rev 8220)
@@ -79,7 +79,7 @@
* followers in a manner that reflects the CPU, IO Wait, and GC Time associated
* with each service.
* <p>
- * The {@link PlatformStatsPlugIn}\xCA and {@link GangliaPlugIn} MUST be enabled
+ * The {@link PlatformStatsPlugIn} and {@link GangliaPlugIn} MUST be enabled
* for the default load balancer policy to operate. It depends on those plugins
* to maintain a model of the load on the HA replication cluster. The
* GangliaPlugIn should be run only as a listener if you are are running the
Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-07 15:31:17 UTC (rev 8219)
+++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-07 15:39:05 UTC (rev 8220)
@@ -394,6 +394,7 @@
overview="${bigdata.dir}/overview.html"
windowtitle="bigdata® v${build.ver}"
classpathref="build.classpath"
+ encoding="utf-8"
private="false"
>
<arg value="-J-Xmx1000m" />
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <jer...@us...> - 2014-05-07 15:39:20
|
Revision: 8221
http://sourceforge.net/p/bigdata/code/8221
Author: jeremy_carroll
Date: 2014-05-07 15:39:17 +0000 (Wed, 07 May 2014)
Log Message:
-----------
Initial version of ConfigurableAnalyzerFactory to address trac 912
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/.settings/
branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/EmptyAnalyzer.java
Added: branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs 2014-05-07 15:39:17 UTC (rev 8221)
@@ -0,0 +1,2 @@
+eclipse.preferences.version=1
+encoding//bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java=UTF-8
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java 2014-05-07 15:39:17 UTC (rev 8221)
@@ -0,0 +1,805 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*
+ * Created on May 6, 2014 by Jeremy J. Carroll, Syapse Inc.
+ */
+package com.bigdata.search;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.lang.reflect.Constructor;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.regex.Pattern;
+
+import org.apache.log4j.Logger;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.KeywordAnalyzer;
+import org.apache.lucene.analysis.SimpleAnalyzer;
+import org.apache.lucene.analysis.StopAnalyzer;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.miscellaneous.PatternAnalyzer;
+import org.apache.lucene.analysis.ru.RussianAnalyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.util.Version;
+
+import com.bigdata.btree.keys.IKeyBuilder;
+import com.bigdata.btree.keys.KeyBuilder;
+
+/**
+ * This class can be used with the bigdata properties file to specify
+ * which {@link Analyzer}s are used for which languages.
+ * Languages are specified by the language tag on RDF literals, which conform
+ * with <a href="http://www.rfc-editor.org/rfc/rfc5646.txt">RFC 5646</a>.
+ * Within bigdata plain literals are assigned to the default locale's language.
+ *
+ * The bigdata properties are used to map language ranges, as specified by
+ * <a href="http://www.rfc-editor.org/rfc/rfc4647.txt">RFC 4647</a> to classes which extend {@link Analyzer}.
+ * Supported classes included all the natural language specific classes from Lucene, and also:
+ * <ul>
+ * <li>{@link PatternAnalyzer}
+ * <li>{@link KeywordAnalyzer}
+ * <li>{@link SimpleAnalyzer}
+ * <li>{@link StopAnalyzer}
+ * <li>{@link WhitespaceAnalyzer}
+ * <li>{@link StandardAnalyzer}
+ * </ul>
+ * More generally any subclass of {@link Analyzer} that has at least one constructor matching:
+ * <ul>
+ * <li>no arguments
+ * <li>{@link Version}
+ * <li>{@link Set} (of strings, the stop words)
+ * <li>{@link Version}, {@link Set}
+ * </ul>
+ * is usable. If the class has a static method named <code>getDefaultStopSet()</code> then this is assumed
+ * to do what it says on the can; some of the Lucene analyzers store their default stop words elsewhere,
+ * and such stopwords are usable by this class. If no stop word set can be found, and there is a constructor without
+ * stopwords and a constructor with stopwords, then the former is assumed to use a default stop word set.
+ * <p>
+ * Configuration is by means of the bigdata properties file.
+ * All relevant properties start <code>com.bigdata.search.ConfigurableAnalyzerFactory</code> which we
+ * abbreviate to <code>c.b.s.C</code> in this documentation.
+ * Properties from {@link Options} apply to the factory.
+ * <p>
+ *
+ * If there are no such properties at all then the property {@link Options#INCLUDE_DEFAULTS} is set to true,
+ * and the behavior of this class is the same as the legacy {@link DefaultAnalyzerFactory}.
+ * <p>
+ * Other properties, from {@link AnalyzerOptions} start with
+ * <code>c.b.s.C.analyzer.<em>language-range</em></code> where <code><em>language-range</em></code> conforms
+ * with the extended language range construct from RFC 4647, section 2.2. These are used to specify
+ * an analyzer for the given language range.
+ * <p>
+ * If no analyzer is specified for the language range <code>*</code> then the {@link StandardAnalyzer} is used.
+ * <p>
+ * Given any specific language, then the analyzer matching the longest configured language range,
+ * measured in number of subtags is used {@link #getAnalyzer(String, boolean)}
+ * In the event of a tie, the alphabetically first language range is used.
+ * The algorithm to find a match is "Extended Filtering" as defined in section 3.3.2 of RFC 4647.
+ * <p>
+ * Some useful analyzers are as follows:
+ * <dl>
+ * <dt>{@link KeywordAnalyzer}</dt>
+ * <dd>This treats every lexical value as a single search token</dd>
+ * <dt>{@link WhitespaceAnalyzer}</dt>
+ * <dd>This uses whitespace to tokenize</dd>
+ * <dt>{@link PatternAnalyzer}</dt>
+ * <dd>This uses a regular expression to tokenize</dd>
+ * <dt>{@link EmptyAnalyzer}</dt>
+ * <dd>This suppresses the functionality, by treating every expression as a stop word.</dd>
+ * </dl>
+ * there are in addition the language specific analyzers that are included
+ * by using the option {@link Options#INCLUDE_DEFAULTS}
+ *
+ *
+ * @author jeremycarroll
+ *
+ */
+public class ConfigurableAnalyzerFactory implements IAnalyzerFactory {
+ final private static transient Logger log = Logger.getLogger(ConfigurableAnalyzerFactory.class);
+
+ private static class LanguageRange implements Comparable<LanguageRange> {
+
+ private final String range[];
+ private final String full;
+
+ public LanguageRange(String range) {
+ this.range = range.split("-");
+ full = range;
+ }
+
+ @Override
+ public int compareTo(LanguageRange o) {
+ if (equals(o)) {
+ return 0;
+ }
+ int diff = o.range.length - range.length;
+ if (diff != 0) {
+ // longest first
+ return diff;
+ }
+ if (range.length == 1) {
+ // * last
+ if (range[0].equals("*")) {
+ return 1;
+ }
+ if (o.range[0].equals("*")) {
+ return -1;
+ }
+ }
+ // alphabetically
+ for (int i=0; i<range.length; i++) {
+ diff = range[i].compareTo(o.range[i]);
+ if (diff != 0) {
+ return diff;
+ }
+ }
+ throw new RuntimeException("Impossible - supposedly");
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return (o instanceof LanguageRange) && ((LanguageRange)o).full.equals(full);
+ }
+ @Override
+ public int hashCode() {
+ return full.hashCode();
+ }
+
+ // See RFC 4647, 3.3.2
+ public boolean extendedFilterMatch(String[] language) {
+ // RFC 4647 step 2
+ if (!matchSubTag(language[0], range[0])) {
+ return false;
+ }
+ int rPos = 1;
+ int lPos = 1;
+ // variant step - for private use flags
+ if (language[0].equals("x") && range[0].equals("*")) {
+ lPos = 0;
+ }
+ // RFC 4647 step 3
+ while (rPos < range.length) {
+ // step 3A
+ if (range[rPos].equals("*")) {
+ rPos ++;
+ continue;
+ }
+ // step 3B
+ if (lPos >= language.length) {
+ return false;
+ }
+ // step 3C
+ if (matchSubTag(language[lPos], range[rPos])) {
+ lPos++;
+ rPos++;
+ continue;
+ }
+ if (language[lPos].length()==1) {
+ return false;
+ }
+ lPos++;
+ }
+ // RFC 4647 step 4
+ return true;
+ }
+
+ // RFC 4647, 3.3.2, step 1
+ private boolean matchSubTag(String langSubTag, String rangeSubTag) {
+ return langSubTag.equals(rangeSubTag) || "*".equals(rangeSubTag);
+ }
+
+ }
+ /**
+ * Options understood by the {@link ConfigurableAnalyzerFactory}.
+ */
+ public interface Options {
+ /**
+ * By setting this option to true, then the behavior of the legacy {@link DefaultAnalyzerFactory}
+ * is added, and may be overridden by the settings of the user.
+ * Specifically the following properties are loaded, prior to loading the
+ * user's specification (with <code>c.b.s.C</code> expanding to
+ * <code>com.bigdata.search.ConfigurableAnalyzerFactory</code>)
+<pre>
+c.b.s.C.analyzer.*.like=eng
+c.b.s.C.analyzer.por.analyzerClass=org.apache.lucene.analysis.br.BrazilianAnalyzer
+c.b.s.C.analyzer.pt.like=por
+c.b.s.C.analyzer.zho.analyzerClass=org.apache.lucene.analysis.cn.ChineseAnalyzer
+c.b.s.C.analyzer.chi.like=zho
+c.b.s.C.analyzer.zh.like=zho
+c.b.s.C.analyzer.jpn.analyzerClass=org.apache.lucene.analysis.cjk.CJKAnalyzer
+c.b.s.C.analyzer.ja.like=jpn
+c.b.s.C.analyzer.kor.like=jpn
+c.b.s.C.analyzer.ko.like=kor
+c.b.s.C.analyzer.ces.analyzerClass=org.apache.lucene.analysis.cz.CzechAnalyzer
+c.b.s.C.analyzer.cze.like=ces
+c.b.s.C.analyzer.cs.like=ces
+c.b.s.C.analyzer.dut.analyzerClass=org.apache.lucene.analysis.nl.DutchAnalyzer
+c.b.s.C.analyzer.nld.like=dut
+c.b.s.C.analyzer.nl.like=dut
+c.b.s.C.analyzer.deu.analyzerClass=org.apache.lucene.analysis.de.GermanAnalyzer
+c.b.s.C.analyzer.ger.like=deu
+c.b.s.C.analyzer.de.like=deu
+c.b.s.C.analyzer.gre.analyzerClass=org.apache.lucene.analysis.el.GreekAnalyzer
+c.b.s.C.analyzer.ell.like=gre
+c.b.s.C.analyzer.el.like=gre
+c.b.s.C.analyzer.rus.analyzerClass=org.apache.lucene.analysis.ru.RussianAnalyzer
+c.b.s.C.analyzer.ru.like=rus
+c.b.s.C.analyzer.tha.analyzerClass=org.apache.lucene.analysis.th.ThaiAnalyzer
+c.b.s.C.analyzer.th.like=tha
+c.b.s.C.analyzer.eng.analyzerClass=org.apache.lucene.analysis.standard.StandardAnalyzer
+c.b.s.C.analyzer.en.like=eng
+</pre>
+ *
+ *
+ */
+ String INCLUDE_DEFAULTS = ConfigurableAnalyzerFactory.class.getName() + ".includeDefaults";
+ /**
+ * This is the prefix to all properties configuring the individual analyzers.
+ */
+ String ANALYZER = ConfigurableAnalyzerFactory.class.getName() + ".analyzer.";
+/**
+ * If there is no configuration at all, then the defaults are included,
+ * but any configuration at all totally replaces the defaults, unless
+ * {@link #INCLUDE_DEFAULTS}
+ * is explicitly set to true.
+ */
+ String DEFAULT_INCLUDE_DEFAULTS = "false";
+ }
+ /**
+ * Options understood by analyzers created by {@link ConfigurableAnalyzerFactory}.
+ * These options are appended to the RFC 4647 language range
+ */
+ public interface AnalyzerOptions {
+ /**
+ * If specified this is the fully qualified name of a subclass of {@link Analyzer}
+ * that has appropriate constructors.
+ * Either this or {@link #LIKE} or {@link #PATTERN} must be specified for each language range.
+ */
+ String ANALYZER_CLASS = "analyzerClass";
+
+ /**
+ * The value of this property is a language range, for which
+ * an analyzer is defined.
+ * Treat this language range in the same way as the specified
+ * language range.
+ *
+ * {@link #LIKE} loops are not permitted.
+ *
+ * If this is option is specified for a language range,
+ * then no other option is permitted.
+ */
+ String LIKE = "like";
+
+ /**
+ * The value of this property is one of:
+ * <dl>
+ * <dt>{@link #STOPWORDS_VALUE_NONE}</dt>
+ * <dd>This analyzer is used without stop words.</dd>
+ * <dt>{@link #STOPWORDS_VALUE_DEFAULT}</dt>
+ * <dd>Use the default setting for stopwords for this analyzer. It is an error
+ * to set this value on some analyzers such as {@link SimpleAnalyzer} that do not supprt stop words.
+ * </dd>
+ * <dt>A fully qualified class name</dt>
+ * <dd>... of a subclass of {@link Analyzer} which
+ * has a static method <code>getDefaultStopSet()</code>, in which case, the returned set of stop words is used.
+ * </dd>
+ * </dl>
+ * If the {@link #ANALYZER_CLASS} does not support stop words then any value other than {@link #STOPWORDS_VALUE_NONE} is an error.
+ * If the {@link #ANALYZER_CLASS} does support stop words then the default value is {@link #STOPWORDS_VALUE_DEFAULT}
+ */
+ String STOPWORDS = "stopwords";
+
+ String STOPWORDS_VALUE_DEFAULT = "default";
+
+ String STOPWORDS_VALUE_NONE = "none";
+ /**
+ * If this property is present then the analyzer being used is a
+ * {@link PatternAnalyzer} and the value is the pattern to use.
+ * (Note the {@link Pattern#UNICODE_CHARACTER_CLASS} flag is enabled).
+ * It is an error if a different analyzer class is specified.
+ */
+ String PATTERN = ".pattern";
+
+ }
+
+ private static final String DEFAULT_PROPERTIES =
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.*.like=eng\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.por.analyzerClass=org.apache.lucene.analysis.br.BrazilianAnalyzer\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.pt.like=por\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.zho.analyzerClass=org.apache.lucene.analysis.cn.ChineseAnalyzer\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.chi.like=zho\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.zh.like=zho\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.jpn.analyzerClass=org.apache.lucene.analysis.cjk.CJKAnalyzer\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.ja.like=jpn\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.kor.like=jpn\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.ko.like=kor\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.ces.analyzerClass=org.apache.lucene.analysis.cz.CzechAnalyzer\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.cze.like=ces\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.cs.like=ces\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.dut.analyzerClass=org.apache.lucene.analysis.nl.DutchAnalyzer\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.nld.like=dut\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.nl.like=dut\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.deu.analyzerClass=org.apache.lucene.analysis.de.GermanAnalyzer\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.ger.like=deu\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.de.like=deu\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.gre.analyzerClass=org.apache.lucene.analysis.el.GreekAnalyzer\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.ell.like=gre\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.el.like=gre\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.rus.analyzerClass=org.apache.lucene.analysis.ru.RussianAnalyzer\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.ru.like=rus\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.tha.analyzerClass=org.apache.lucene.analysis.th.ThaiAnalyzer\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.th.like=tha\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.eng.analyzerClass=org.apache.lucene.analysis.standard.StandardAnalyzer\n" +
+ "com.bigdata.search.ConfigurableAnalyzerFactory.analyzer.en.like=eng\n";
+
+ private static class AnalyzerPair implements Comparable<AnalyzerPair>{
+ private final LanguageRange range;
+ private final Analyzer withStopWords;
+ private final Analyzer withoutStopWords;
+
+ AnalyzerPair(String range, Analyzer withStopWords, Analyzer withOutStopWords) {
+ this.range = new LanguageRange(range);
+ this.withStopWords = withStopWords;
+ this.withoutStopWords = withOutStopWords;
+ }
+
+ AnalyzerPair(String range, AnalyzerPair copyMe) {
+ this.range = new LanguageRange(range);
+ this.withStopWords = copyMe.withStopWords;
+ this.withoutStopWords = copyMe.withoutStopWords;
+
+ }
+
+ public Analyzer getAnalyzer(boolean filterStopwords) {
+ return filterStopwords ? withStopWords : withoutStopWords;
+ }
+ @Override
+ public String toString() {
+ return range.full + "=(" + withStopWords.getClass().getSimpleName() +")";
+ }
+
+
+ AnalyzerPair(String range, Constructor<? extends Analyzer> cons, Object ... params) throws Exception {
+ this(range, cons.newInstance(params), cons.newInstance(useEmptyStopWordSet(params)));
+ }
+ AnalyzerPair(String range, Analyzer stopWordsNotSupported) {
+ this(range, stopWordsNotSupported, stopWordsNotSupported);
+ }
+ private static Object[] useEmptyStopWordSet(Object[] params) {
+ Object rslt[] = new Object[params.length];
+ for (int i=0; i<params.length; i++) {
+ if (params[i] instanceof Set) {
+ rslt[i] = Collections.EMPTY_SET;
+ } else {
+ rslt[i] = params[i];
+ }
+ }
+ return rslt;
+ }
+ @Override
+ public int compareTo(AnalyzerPair o) {
+ return range.compareTo(o.range);
+ }
+
+ public boolean extendedFilterMatch(String[] language) {
+ return range.extendedFilterMatch(language);
+ }
+ }
+
+
+ private static class VersionSetAnalyzerPair extends AnalyzerPair {
+ public VersionSetAnalyzerPair(ConfigOptionsToAnalyzer lro,
+ Class<? extends Analyzer> cls) throws Exception {
+ super(lro.languageRange, getConstructor(cls, Version.class, Set.class), Version.LUCENE_CURRENT, lro.getStopWords());
+ }
+ }
+
+ private static class VersionAnalyzerPair extends AnalyzerPair {
+
+ public VersionAnalyzerPair(String range, Class<? extends Analyzer> cls) throws Exception {
+ super(range, getConstructor(cls, Version.class).newInstance(Version.LUCENE_CURRENT));
+ }
+ }
+
+
+ private static class PatternAnalyzerPair extends AnalyzerPair {
+
+ public PatternAnalyzerPair(ConfigOptionsToAnalyzer lro, String pattern) throws Exception {
+ super(lro.languageRange, getConstructor(PatternAnalyzer.class,Version.class,Pattern.class,Boolean.TYPE,Set.class),
+ Version.LUCENE_CURRENT,
+ Pattern.compile(pattern, Pattern.UNICODE_CHARACTER_CLASS),
+ true,
+ lro.getStopWords());
+ }
+ }
+
+
+ /**
+ * This class is initialized with the config options, using the {@link #setProperty(String, String)}
+ * method, for a particular language range and works out which pair of {@link Analyzer}s
+ * to use for that language range.
+ * @author jeremycarroll
+ *
+ */
+ private static class ConfigOptionsToAnalyzer {
+
+ String like;
+ String className;
+ String stopwords;
+ String pattern;
+ final String languageRange;
+ AnalyzerPair result;
+
+ public ConfigOptionsToAnalyzer(String languageRange) {
+ this.languageRange = languageRange;
+ }
+
+ /**
+ * This is called only when we have already identified that
+ * the class does support stopwords.
+ * @return
+ */
+ public Set<?> getStopWords() {
+
+ if (AnalyzerOptions.STOPWORDS_VALUE_NONE.equals(stopwords))
+ return Collections.EMPTY_SET;
+
+ if (useDefaultStopWords()) {
+ return getStopWordsForClass(className);
+ }
+
+ return getStopWordsForClass(stopwords);
+ }
+
+ protected Set<?> getStopWordsForClass(String clazzName) {
+ Class<? extends Analyzer> analyzerClass = getAnalyzerClass(clazzName);
+ try {
+ return (Set<?>) analyzerClass.getMethod("getDefaultStopSet").invoke(null);
+ } catch (Exception e) {
+ if (StandardAnalyzer.class.equals(analyzerClass)) {
+ return StandardAnalyzer.STOP_WORDS_SET;
+ }
+ if (StopAnalyzer.class.equals(analyzerClass)) {
+ return StopAnalyzer.ENGLISH_STOP_WORDS_SET;
+ }
+ throw new RuntimeException("Failed to find stop words from " + clazzName + " for language range "+languageRange);
+ }
+ }
+
+ protected boolean useDefaultStopWords() {
+ return stopwords == null || AnalyzerOptions.STOPWORDS_VALUE_DEFAULT.equals(stopwords);
+ }
+
+ public boolean setProperty(String shortProperty, String value) {
+ if (shortProperty.equals(AnalyzerOptions.LIKE) ) {
+ like = value;
+ } else if (shortProperty.equals(AnalyzerOptions.ANALYZER_CLASS) ) {
+ className = value;
+ } else if (shortProperty.equals(AnalyzerOptions.STOPWORDS) ) {
+ stopwords = value;
+ } else if (shortProperty.equals(AnalyzerOptions.PATTERN) ) {
+ pattern = value;
+ } else {
+ return false;
+ }
+ return true;
+ }
+
+ public void validate() {
+ if (pattern != null ) {
+ if ( className != null && className != PatternAnalyzer.class.getName()) {
+ throw new RuntimeException("Bad Option: Language range "+languageRange + " with pattern propety for class "+ className);
+ }
+ className = PatternAnalyzer.class.getName();
+ }
+ if (PatternAnalyzer.class.getName().equals(className) && pattern == null ) {
+ throw new RuntimeException("Bad Option: Language range "+languageRange + " must specify pattern for PatternAnalyzer.");
+ }
+ if ( (like != null) == (className != null) ) {
+ throw new RuntimeException("Bad Option: Language range "+languageRange + " must specify exactly one of implementation class or like.");
+ }
+ if (stopwords != null && like != null) {
+ throw new RuntimeException("Bad Option: Language range "+languageRange + " must not specify stopwords with like.");
+ }
+
+ }
+
+ private AnalyzerPair construct() throws Exception {
+ if (className == null) {
+ return null;
+ }
+ if (pattern != null) {
+ return new PatternAnalyzerPair(this, pattern);
+
+ }
+ final Class<? extends Analyzer> cls = getAnalyzerClass();
+
+ if (hasConstructor(cls, Version.class, Set.class)) {
+
+ // RussianAnalyzer is missing any way to access stop words.
+ if (RussianAnalyzer.class.equals(cls) && useDefaultStopWords()) {
+ return new AnalyzerPair(languageRange, new RussianAnalyzer(Version.LUCENE_CURRENT, Collections.EMPTY_SET), new RussianAnalyzer(Version.LUCENE_CURRENT));
+ }
+ return new VersionSetAnalyzerPair(this, cls);
+ }
+
+ if (stopwords != null && !stopwords.equals(AnalyzerOptions.STOPWORDS_VALUE_NONE)) {
+ throw new RuntimeException("Bad option: language range: " + languageRange + " stopwords are not supported by " + className);
+ }
+ if (hasConstructor(cls, Version.class)) {
+ return new VersionAnalyzerPair(languageRange, cls);
+ }
+
+ if (hasConstructor(cls)) {
+ return new AnalyzerPair(languageRange, cls.newInstance());
+ }
+ throw new RuntimeException("Bad option: cannot find constructor for class " + className + " for language range " + languageRange);
+ }
+
+ protected Class<? extends Analyzer> getAnalyzerClass() {
+ return getAnalyzerClass(className);
+ }
+
+ @SuppressWarnings("unchecked")
+ protected Class<? extends Analyzer> getAnalyzerClass(String className2) {
+ final Class<? extends Analyzer> cls;
+ try {
+ cls = (Class<? extends Analyzer>) Class.forName(className2);
+ } catch (ClassNotFoundException e) {
+ throw new RuntimeException("Bad option: cannot find class " + className2 + " for language range " + languageRange, e);
+ }
+ return cls;
+ }
+
+ void setAnalyzerPair(AnalyzerPair ap) {
+ result = ap;
+ }
+
+ AnalyzerPair followLikesToAnalyzerPair(int depth, int max,
+ Map<String, ConfigOptionsToAnalyzer> analyzers) {
+ if (result == null) {
+ if (depth == max) {
+ throw new RuntimeException("Bad configuration: - 'like' loop for language range " + languageRange);
+ }
+ ConfigOptionsToAnalyzer next = analyzers.get(like);
+ if (next == null) {
+ throw new RuntimeException("Bad option: - 'like' not found for language range " + languageRange+ " (not found: '"+ like +"')");
+ }
+ result = new AnalyzerPair(languageRange, next.followLikesToAnalyzerPair(depth+1, max, analyzers));
+ }
+ return result;
+ }
+
+ }
+
+ private final AnalyzerPair config[];
+
+ private final Map<String, AnalyzerPair> langTag2AnalyzerPair = new ConcurrentHashMap<String, AnalyzerPair>();
+
+ /**
+ * While it would be very unusual to have more than 500 different language tags in a store
+ * it is possible - we use a max size to prevent a memory explosion, and a naive caching
+ * strategy so the code will still work on the {@link #MAX_LANG_CACHE_SIZE}+1 th entry.
+ */
+ private static final int MAX_LANG_CACHE_SIZE = 500;
+
+ private final String defaultLanguage;
+
+
+ public ConfigurableAnalyzerFactory(final FullTextIndex<?> fullTextIndex) {
+ // despite our name, we actually make all the analyzers now, and getAnalyzer method is merely a lookup.
+
+ if (fullTextIndex == null)
+ throw new IllegalArgumentException();
+
+ defaultLanguage = getDefaultLanguage(fullTextIndex);
+
+ final Properties properties = initProperties(fullTextIndex);
+
+ final Map<String, ConfigOptionsToAnalyzer> analyzers = new HashMap<String, ConfigOptionsToAnalyzer>();
+
+ properties2analyzers(properties, analyzers);
+
+ if (!analyzers.containsKey("*")) {
+ throw new RuntimeException("Bad config: must specify behavior on language range '*'");
+ }
+
+ for (ConfigOptionsToAnalyzer a: analyzers.values()) {
+ a.validate();
+ }
+
+ try {
+ for (ConfigOptionsToAnalyzer a: analyzers.values()) {
+ a.setAnalyzerPair(a.construct());
+ }
+ } catch (Exception e) {
+ throw new RuntimeException("Cannot construct ConfigurableAnalyzerFactory", e);
+ }
+ int sz = analyzers.size();
+ for (ConfigOptionsToAnalyzer a: analyzers.values()) {
+ a.followLikesToAnalyzerPair(0, sz, analyzers);
+ }
+
+ config = new AnalyzerPair[sz];
+ int i = 0;
+ for (ConfigOptionsToAnalyzer a: analyzers.values()) {
+ config[i++] = a.result;
+ }
+ Arrays.sort(config);
+ if (log.isInfoEnabled()) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("Installed text Analyzer's: ");
+ for (AnalyzerPair ap: config) {
+ sb.append(ap.toString());
+ sb.append(", ");
+ }
+ log.info(sb.toString());
+ }
+ }
+
+ private String getDefaultLanguage(final FullTextIndex<?> fullTextIndex) {
+
+ final IKeyBuilder keyBuilder = fullTextIndex.getKeyBuilder();
+
+
+ if (keyBuilder.isUnicodeSupported()) {
+
+ // The configured local for the database.
+ final Locale locale = ((KeyBuilder) keyBuilder)
+ .getSortKeyGenerator().getLocale();
+
+ // The analyzer for that locale.
+ return locale.getLanguage();
+
+ } else {
+ // Rule, Britannia!
+ return "en";
+
+ }
+ }
+
+ private static boolean hasConstructor(Class<? extends Analyzer> cls, Class<?> ... parameterTypes) {
+ return getConstructor(cls, parameterTypes) != null;
+ }
+
+ protected static Constructor<? extends Analyzer> getConstructor(Class<? extends Analyzer> cls,
+ Class<?>... parameterTypes) {
+ try {
+ return cls.getConstructor(parameterTypes);
+ } catch (NoSuchMethodException | SecurityException e) {
+ return null;
+ }
+ }
+
+ private void properties2analyzers(Properties props, Map<String, ConfigOptionsToAnalyzer> analyzers) {
+
+ Enumeration<?> en = props.propertyNames();
+ while (en.hasMoreElements()) {
+
+ String prop = (String)en.nextElement();
+ if (prop.equals(Options.INCLUDE_DEFAULTS)) continue;
+ if (prop.startsWith(Options.ANALYZER)) {
+ String languageRangeAndProperty[] = prop.substring(Options.ANALYZER.length()).split("[.]");
+ if (languageRangeAndProperty.length == 2) {
+
+ String languageRange = languageRangeAndProperty[0].toLowerCase(Locale.US); // Turkish "I" could create a problem
+ String shortProperty = languageRangeAndProperty[1];
+ String value = props.getProperty(prop);
+ log.info("Setting language range: " + languageRange + "/" + shortProperty + " = " + value);
+ ConfigOptionsToAnalyzer cons = analyzers.get(languageRange);
+ if (cons == null) {
+ cons = new ConfigOptionsToAnalyzer(languageRange);
+ analyzers.put(languageRange, cons);
+ }
+ if (cons.setProperty(shortProperty, value)) {
+ continue;
+ }
+ }
+ }
+
+ log.warn("Failed to process configuration property: " + prop);
+ }
+
+ }
+
+ protected Properties initProperties(final FullTextIndex<?> fullTextIndex) {
+ final Prope...
[truncated message content] |
|
From: <jer...@us...> - 2014-05-07 15:57:56
|
Revision: 8222
http://sourceforge.net/p/bigdata/code/8222
Author: jeremy_carroll
Date: 2014-05-07 15:57:53 +0000 (Wed, 07 May 2014)
Log Message:
-----------
removed unnecessary UTF-8 encoding pref
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs
Deleted: branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs 2014-05-07 15:39:17 UTC (rev 8221)
+++ branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs 2014-05-07 15:57:53 UTC (rev 8222)
@@ -1,2 +0,0 @@
-eclipse.preferences.version=1
-encoding//bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java=UTF-8
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java 2014-05-07 15:39:17 UTC (rev 8221)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java 2014-05-07 15:57:53 UTC (rev 8222)
@@ -544,7 +544,7 @@
final Class<? extends Analyzer> cls = getAnalyzerClass();
if (hasConstructor(cls, Version.class, Set.class)) {
-
+
// RussianAnalyzer is missing any way to access stop words.
if (RussianAnalyzer.class.equals(cls) && useDefaultStopWords()) {
return new AnalyzerPair(languageRange, new RussianAnalyzer(Version.LUCENE_CURRENT, Collections.EMPTY_SET), new RussianAnalyzer(Version.LUCENE_CURRENT));
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <mrp...@us...> - 2014-05-07 20:25:03
|
Revision: 8223
http://sourceforge.net/p/bigdata/code/8223
Author: mrpersonick
Date: 2014-05-07 20:24:56 +0000 (Wed, 07 May 2014)
Log Message:
-----------
Commit of Blueprints/Gremlin support. See ticket 913.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/.classpath
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java
branches/BIGDATA_RELEASE_1_3_0/build.properties
branches/BIGDATA_RELEASE_1_3_0/build.xml
branches/BIGDATA_RELEASE_1_3_0/pom.xml
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/jettison-license.txt
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.4.0.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/jettison-1.3.3.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphEmbedded.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataVertex.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BlueprintsRDFFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/edge.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/edges.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/edgesByProperty.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/vertex.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphClient.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/graph-example-1.xml
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataBlueprintsGraph.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEventTransactionalGraph.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataVertex.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/QueryManager.java
Modified: branches/BIGDATA_RELEASE_1_3_0/.classpath
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-07 15:57:53 UTC (rev 8222)
+++ branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-07 20:24:56 UTC (rev 8223)
@@ -1,16 +1,19 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
+ <classpathentry kind="src" path="bigdata/src/java"/>
<classpathentry kind="src" path="bigdata-rdf/src/java"/>
+ <classpathentry kind="src" path="bigdata-sails/src/java"/>
+ <classpathentry kind="src" path="bigdata-blueprints/src/java"/>
+ <classpathentry kind="src" path="bigdata/src/test"/>
+ <classpathentry kind="src" path="bigdata-rdf/src/test"/>
+ <classpathentry kind="src" path="bigdata-sails/src/test"/>
+ <classpathentry kind="src" path="bigdata-blueprints/src/test"/>
+ <classpathentry kind="src" path="bigdata-war/src"/>
+ <classpathentry kind="src" path="bigdata/src/resources/logging"/>
<classpathentry kind="src" path="bigdata-rdf/src/samples"/>
<classpathentry kind="src" path="dsi-utils/src/java"/>
- <classpathentry kind="src" path="bigdata/src/resources/logging"/>
<classpathentry kind="src" path="bigdata-sails/src/samples"/>
<classpathentry kind="src" path="bigdata-jini/src/test"/>
- <classpathentry kind="src" path="bigdata-sails/src/java"/>
- <classpathentry kind="src" path="bigdata/src/java"/>
- <classpathentry kind="src" path="bigdata-rdf/src/test"/>
- <classpathentry kind="src" path="bigdata/src/test"/>
- <classpathentry kind="src" path="bigdata-sails/src/test"/>
<classpathentry kind="src" path="bigdata-jini/src/java"/>
<classpathentry kind="src" path="contrib/src/problems"/>
<classpathentry kind="src" path="bigdata/src/samples"/>
@@ -21,7 +24,6 @@
<classpathentry kind="src" path="junit-ext/src/java"/>
<classpathentry kind="src" path="lgpl-utils/src/java"/>
<classpathentry kind="src" path="lgpl-utils/src/test"/>
- <classpathentry kind="src" path="bigdata-war/src"/>
<classpathentry kind="src" path="bigdata-ganglia/src/java"/>
<classpathentry kind="src" path="bigdata-ganglia/src/test"/>
<classpathentry kind="src" path="bigdata-rdf/src/resources/service-providers"/>
@@ -92,5 +94,8 @@
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-webapp-9.1.4.v20140401.jar" sourcepath="/Users/bryan/Downloads/org.eclipse.jetty.project-jetty-9.1.4.v20140401"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-xml-9.1.4.v20140401.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata-sails/lib/jackson-core-2.2.3.jar"/>
+ <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-core-2.4.0.jar"/>
+ <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-test-2.4.0.jar"/>
+ <classpathentry kind="lib" path="bigdata-blueprints/lib/jettison-1.3.3.jar"/>
<classpathentry kind="output" path="bin"/>
</classpath>
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/jettison-license.txt
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/jettison-license.txt (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/jettison-license.txt 2014-05-07 20:24:56 UTC (rev 8223)
@@ -0,0 +1,13 @@
+Copyright 2006 Envoi Solutions LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/jettison-license.txt
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.4.0.jar
===================================================================
(Binary files differ)
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.4.0.jar
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.4.0.jar 2014-05-07 15:57:53 UTC (rev 8222)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.4.0.jar 2014-05-07 20:24:56 UTC (rev 8223)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.4.0.jar
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/jettison-1.3.3.jar
===================================================================
(Binary files differ)
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/jettison-1.3.3.jar
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/jettison-1.3.3.jar 2014-05-07 15:57:53 UTC (rev 8222)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/jettison-1.3.3.jar 2014-05-07 20:24:56 UTC (rev 8223)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/jettison-1.3.3.jar
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataBlueprintsGraph.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataBlueprintsGraph.java 2014-05-07 15:57:53 UTC (rev 8222)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataBlueprintsGraph.java 2014-05-07 20:24:56 UTC (rev 8223)
@@ -1,141 +0,0 @@
-package com.bigdata.blueprints;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-
-import sun.reflect.generics.reflectiveObjects.NotImplementedException;
-
-import com.tinkerpop.blueprints.Edge;
-import com.tinkerpop.blueprints.Features;
-import com.tinkerpop.blueprints.GraphQuery;
-import com.tinkerpop.blueprints.TransactionalGraph;
-import com.tinkerpop.blueprints.Vertex;
-
-
-public abstract class BigdataBlueprintsGraph implements BigdataEventTransactionalGraph {
- // elements that we will be deleting from the store
- private ArrayList<BigdataElement> removedElements = new ArrayList<BigdataElement>();
- // vertices that we will be adding to the store
- private HashMap<String,BigdataVertex> addedVertices = new HashMap<String,BigdataVertex>();
- // elements that we will be adding to the store
- private HashMap<String,BigdataEdge> addedEdges = new HashMap<String,BigdataEdge>();
- private QueryManager qm = null;
-
- public BigdataBlueprintsGraph () { }
-
- public BigdataBlueprintsGraph (QueryManager qm) { this.qm = qm; }
-
- public void setQueryManager(QueryManager qm) { this.qm = qm; }
- public QueryManager getQueryManager() { return qm; }
-
- public void commit() {
- // form and submit query
- //
- //
- //
- throwUnimplemented( "commit" );
- }
-
- public void rollback() {
- throwUnimplemented( "rollback" );
- }
-
- public void stopTransaction(TransactionalGraph.Conclusion conclusion) {
- throwUnimplemented( "stopTransaction" );
- }
-
- public void shutdown() {
- throwUnimplemented( "shutdown" );
- }
-
- public Vertex getVertex(Object id) {
- // we can only remove an item from the "add" queue
- return addedVertices.get( (String) id );
- }
-
- public BigdataBlueprintsGraph getBasseGraph() { return this; }
-
- public Edge addEdge(Object id, BigdataVertex outVertex, BigdataVertex inVertex, String label) {
- BigdataEdge edge = new BigdataEdge( (String)id, outVertex, inVertex, label );
- addedEdges.put((String)id, edge);
- return edge;
- }
-
- public Features getFeatures() {
- throwUnimplemented( "getFeatures" );
- return (Features)null;
- }
-
- public Vertex addVertex(Object id) {
- BigdataVertex v = new BigdataVertex( (String)id );
- addedVertices.put( (String)id, v );
- return v;
- }
-
- public void removeVertex(BigdataVertex vertex) {
- addedVertices.remove( vertex.getId() ); // if present
- removedElements.add( vertex );
- }
-
- public Iterable<Vertex> getVertices(String key, Object value) {
- throwUnimplemented( "getVertices(String key, Object value)" );
- return (Iterable<Vertex>)null;
- }
-
- public Iterable<Vertex> getVertices() {
- // we only return what is in the "add" queue
- final List<Vertex> vertexList = new ArrayList<Vertex>();
- vertexList.addAll( addedVertices.values() );
- return vertexList;
- }
-
- public Edge getEdge(Object id) {
- // we can only remove an item from the "add" queue
- return addedEdges.get( (String) id );
- }
-
- public void removeEdge(BigdataEdge edge) {
- addedEdges.remove( edge.getId() ); // if present
- removedElements.add( edge );
- }
-
- public Iterable<Edge> getEdges(String key, Object value) {
- throwUnimplemented( "getEdges(String key, Object value)" );
- return (Iterable<Edge>)null;
- }
-
- public Iterable<Edge> getEdges() {
- // we only return what is in the add queue
- final List<Edge> edgeList = new ArrayList<Edge>();
- edgeList.addAll( addedEdges.values() );
- return edgeList;
- }
-
- public GraphQuery query() {
- throwUnimplemented( "queries" );
- return (GraphQuery)null;
- }
-
- // @SuppressWarnings("deprecation")
- private void throwUnimplemented(String method) {
- // unchecked( new Exception( "The '" + method + "' has not been implemented." ) );
- throw new NotImplementedException();
- }
-
-
- /* Maybe use later
- *
- public static RuntimeException unchecked(Throwable e) {
- BigdataBlueprintsGraph.<RuntimeException>throwAny(e);
- return null;
- }
-
- @SuppressWarnings("unchecked")
- private static <E extends Throwable> void throwAny(Throwable e) throws E {
- throw (E)e;
- }
- */
-
-}
-
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java 2014-05-07 15:57:53 UTC (rev 8222)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java 2014-05-07 20:24:56 UTC (rev 8223)
@@ -1,52 +0,0 @@
-package com.bigdata.blueprints;
-
-import sun.reflect.generics.reflectiveObjects.NotImplementedException;
-
-import com.tinkerpop.blueprints.Direction;
-import com.tinkerpop.blueprints.Edge;
-import com.tinkerpop.blueprints.Vertex;
-
-
-public class BigdataEdge extends BigdataElement implements Edge {
-
- protected BigdataVertex vOut = null;
- protected BigdataVertex vIn = null;
-
-
- public BigdataEdge(String id) {
- super(id);
- }
-
- public BigdataEdge(String id, String label) {
- super(id,label);
- }
-
- public BigdataEdge(String id, BigdataVertex out, BigdataVertex in, String label) {
- super(id,label);
- this.vOut = out;
- this.vIn = in;
- }
-
- public Vertex getVertex(Direction direction) throws IllegalArgumentException {
- if( direction == Direction.IN ) {
- return vIn;
- }
- else if( direction == Direction.OUT ) {
- return vOut;
- }
- else {
- throw new NotImplementedException();
- }
- }
-
- @Override
- public void remove() {
- throw new NotImplementedException();
- }
-
- public String toString() {
- // toTTLString();
- return "Not Implemented";
- }
-
-}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java 2014-05-07 20:24:56 UTC (rev 8223)
@@ -0,0 +1,107 @@
+/**
+Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.blueprints;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.openrdf.model.Statement;
+import org.openrdf.model.URI;
+import org.openrdf.model.vocabulary.RDFS;
+
+import com.tinkerpop.blueprints.Direction;
+import com.tinkerpop.blueprints.Edge;
+import com.tinkerpop.blueprints.Vertex;
+
+/**
+ * Edge implementation that wraps an Edge statement and points to a
+ * {@link BigdataGraph} instance.
+ *
+ * @author mikepersonick
+ *
+ */
+public class BigdataEdge extends BigdataElement implements Edge {
+
+ private static final List<String> blacklist = Arrays.asList(new String[] {
+ "id", "", "label"
+ });
+
+ protected final Statement stmt;
+
+ public BigdataEdge(final Statement stmt, final BigdataGraph graph) {
+ super(stmt.getPredicate(), graph);
+
+ this.stmt = stmt;
+ }
+
+ @Override
+ public Object getId() {
+ return graph.factory.fromEdgeURI(uri);
+ }
+
+ @Override
+ public void remove() {
+ graph.removeEdge(this);
+ }
+
+ @Override
+ public String getLabel() {
+ return (String) graph.getProperty(uri, RDFS.LABEL);
+ }
+
+ @Override
+ public Vertex getVertex(final Direction dir) throws IllegalArgumentException {
+
+ if (dir == Direction.BOTH) {
+ throw new IllegalArgumentException();
+ }
+
+ final URI uri = (URI)
+ (dir == Direction.OUT ? stmt.getSubject() : stmt.getObject());
+
+ final String id = graph.factory.fromVertexURI(uri);
+
+ return graph.getVertex(id);
+
+ }
+
+ @Override
+ public void setProperty(final String property, final Object val) {
+
+ if (property == null || blacklist.contains(property)) {
+ throw new IllegalArgumentException();
+ }
+
+ super.setProperty(property, val);
+
+ }
+
+ @Override
+ public String toString() {
+ final URI s = (URI) stmt.getSubject();
+ final URI p = (URI) stmt.getPredicate();
+ final URI o = (URI) stmt.getObject();
+ return "e["+p.getLocalName()+"]["+s.getLocalName()+"->"+o.getLocalName()+"]";
+ }
+
+}
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java 2014-05-07 15:57:53 UTC (rev 8222)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java 2014-05-07 20:24:56 UTC (rev 8223)
@@ -1,70 +0,0 @@
-package com.bigdata.blueprints;
-
-import sun.reflect.generics.reflectiveObjects.NotImplementedException;
-
-import java.util.HashMap;
-import java.util.Set;
-
-import org.openrdf.model.vocabulary.RDFS;
-import com.tinkerpop.blueprints.Element;
-
-public class BigdataElement implements Element {
-
- protected String id = null; // must be a URI
-
- // implied here is that the properties exist in the graph store, we would need a 2nd property setter
- private HashMap<String,String> properties = new HashMap<String,String>();
- // properties that we will be deleting from the store
- private HashMap<String,String> removedProperties = new HashMap<String,String>();
- // properties that we will be adding to the store
- private HashMap<String,String> addedProperties = new HashMap<String,String>();
-
- public BigdataElement(String id) {
- this.id = id;
- }
-
- public BigdataElement(String id, String label) {
- this.id = id;
- setProperty( RDFS.LABEL.toString(), label );
- }
-
- @SuppressWarnings("unchecked")
- public <T> T getProperty(String key) {
- return (T) properties.get(key);
- }
-
- public Set<String> getPropertyKeys() {
- Set<String> keys = properties.keySet();
- keys.addAll( addedProperties.keySet() );
- return keys;
- }
-
- public void setProperty(String key, Object value) {
- addedProperties.put(key,(String)value );
- properties.put(key, (String)value);
- }
-
- @SuppressWarnings("unchecked")
- public <T> T removeProperty(String key) {
- removedProperties.put(key, key);
- return (T) properties.remove(key);
- }
-
- public void remove() {
- // delete from graph
- throw new NotImplementedException();
- }
-
- public Object getId() {
- return id;
- }
-
- public boolean equals(Object obj) {
- return obj.toString().equals(this.toString());
- }
-
- public String getLabel() {
- return getProperty( RDFS.LABEL.toString() );
- }
-
-}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java 2014-05-07 20:24:56 UTC (rev 8223)
@@ -0,0 +1,134 @@
+/**
+Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.blueprints;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+
+import org.openrdf.model.Literal;
+import org.openrdf.model.URI;
+
+import com.tinkerpop.blueprints.Element;
+
+/**
+ * Base class for {@link BigdataVertex} and {@link BigdataEdge}. Handles
+ * property-related methods.
+ *
+ * @author mikepersonick
+ *
+ */
+public abstract class BigdataElement implements Element {
+
+ private static final List<String> blacklist = Arrays.asList(new String[] {
+ "id", ""
+ });
+
+ protected final URI uri;
+ protected final BigdataGraph graph;
+
+ public BigdataElement(final URI uri, final BigdataGraph graph) {
+ this.uri = uri;
+ this.graph = graph;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <T> T getProperty(final String property) {
+
+ final URI p = graph.factory.toPropertyURI(property);
+
+ return (T) graph.getProperty(uri, p);
+
+ }
+
+ @Override
+ public Set<String> getPropertyKeys() {
+
+ return graph.getPropertyKeys(uri);
+
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <T> T removeProperty(final String property) {
+
+ final URI p = graph.factory.toPropertyURI(property);
+
+ return (T) graph.removeProperty(uri, p);
+
+ }
+
+ @Override
+ public void setProperty(final String property, final Object val) {
+
+ if (property == null || blacklist.contains(property)) {
+ throw new IllegalArgumentException();
+ }
+
+ final URI p = graph.factory.toPropertyURI(property);
+
+ final Literal o = graph.factory.toLiteral(val);
+
+ graph.setProperty(uri, p, o);
+
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((graph == null) ? 0 : graph.hashCode());
+ result = prime * result + ((uri == null) ? 0 : uri.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ BigdataElement other = (BigdataElement) obj;
+ if (graph == null) {
+ if (other.graph != null)
+ return false;
+ } else if (!graph.equals(other.graph))
+ return false;
+ if (uri == null) {
+ if (other.uri != null)
+ return false;
+ } else if (!uri.equals(other.uri))
+ return false;
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return uri.toString();
+ }
+
+
+}
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEventTransactionalGraph.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEventTransactionalGraph.java 2014-05-07 15:57:53 UTC (rev 8222)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEventTransactionalGraph.java 2014-05-07 20:24:56 UTC (rev 8223)
@@ -1,8 +0,0 @@
-package com.bigdata.blueprints;
-
-import com.tinkerpop.blueprints.Graph;
-import com.tinkerpop.blueprints.ThreadedTransactionalGraph;
-
-public interface BigdataEventTransactionalGraph extends Graph, ThreadedTransactionalGraph {
-
-}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-07 20:24:56 UTC (rev 8223)
@@ -0,0 +1,843 @@
+/**
+Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.blueprints;
+
+import info.aduna.iteration.CloseableIteration;
+
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+
+import org.apache.commons.io.IOUtils;
+import org.openrdf.OpenRDFException;
+import org.openrdf.model.Literal;
+import org.openrdf.model.Statement;
+import org.openrdf.model.URI;
+import org.openrdf.model.Value;
+import org.openrdf.model.impl.StatementImpl;
+import org.openrdf.model.impl.URIImpl;
+import org.openrdf.model.vocabulary.RDF;
+import org.openrdf.model.vocabulary.RDFS;
+import org.openrdf.query.GraphQueryResult;
+import org.openrdf.query.QueryLanguage;
+import org.openrdf.repository.RepositoryConnection;
+import org.openrdf.repository.RepositoryResult;
+
+import com.bigdata.rdf.store.BD;
+import com.tinkerpop.blueprints.Direction;
+import com.tinkerpop.blueprints.Edge;
+import com.tinkerpop.blueprints.Features;
+import com.tinkerpop.blueprints.Graph;
+import com.tinkerpop.blueprints.GraphQuery;
+import com.tinkerpop.blueprints.Vertex;
+import com.tinkerpop.blueprints.util.DefaultGraphQuery;
+
+/**
+ * A base class for a Blueprints wrapper around a bigdata back-end.
+ *
+ * @author mikepersonick
+ *
+ */
+public abstract class BigdataGraph implements Graph {
+
+ public static final URI VERTEX = new URIImpl(BD.NAMESPACE + "Vertex");
+
+ public static final URI EDGE = new URIImpl(BD.NAMESPACE + "Edge");
+
+// final BigdataSailRepository repo;
+//
+// transient BigdataSailRepositoryConnection cxn;
+
+ final BlueprintsRDFFactory factory;
+
+// public BigdataGraph(final BigdataSailRepository repo) {
+// this(repo, BigdataRDFFactory.INSTANCE);
+// }
+
+ public BigdataGraph(//final BigdataSailRepository repo,
+ final BlueprintsRDFFactory factory) {
+// try {
+// this.repo = repo;
+// this.cxn = repo.getUnisolatedConnection();
+// this.cxn.setAutoCommit(false);
+ this.factory = factory;
+// } catch (RepositoryException ex) {
+// throw new RuntimeException(e...
[truncated message content] |