From: <tho...@us...> - 2012-10-12 14:37:09
|
Revision: 6672 http://bigdata.svn.sourceforge.net/bigdata/?rev=6672&view=rev Author: thompsonbry Date: 2012-10-12 14:36:58 +0000 (Fri, 12 Oct 2012) Log Message: ----------- Added support for reporting on the highly available quorum into the NSS Status page. This information now appears automatically for a highly available quourm. Service Description now correctly reports if the KB is backed by a Journal that is highly available. DumpJournal was modified to use PrintWriter rather than PrintStream. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlue.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlueDelegate.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumService.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/service/IService.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/service/jini/RemoteAdministrable.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/service/jini/RemoteStorageLocationAdmin.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlue.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlue.java 2012-10-11 17:55:39 UTC (rev 6671) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlue.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -91,4 +91,9 @@ IHARootBlockResponse getRootBlock(IHARootBlockRequest msg) throws IOException; + /** + * The port that the NanoSparqlServer is running on. + */ + int getNSSPort() throws IOException; + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlueDelegate.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlueDelegate.java 2012-10-11 17:55:39 UTC (rev 6671) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlueDelegate.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -177,4 +177,9 @@ return delegate.sendHALogForWriteSet(msg); } + @Override + public int getNSSPort() throws IOException { + return delegate.getNSSPort(); + } + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumService.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumService.java 2012-10-11 17:55:39 UTC (rev 6671) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumService.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -74,6 +74,13 @@ long getLastCommitCounter(); /** + * Return the service directory. This directory has various metadata about + * the service process, but it might not contain either the data or the HA + * log files. + */ + File getServiceDir(); + + /** * Return the directory in which we are logging the write blocks. */ File getHALogDir(); @@ -98,5 +105,5 @@ * if the local service already has writes. */ void installRootBlocksFromQuorum(final IRootBlockView rootBlock); - + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java 2012-10-11 17:55:39 UTC (rev 6671) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -178,7 +178,7 @@ return localService; } - + @Override public Executor getExecutor() { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-10-11 17:55:39 UTC (rev 6671) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -5062,6 +5062,13 @@ } + @Override + public int getNSSPort() { + + throw new UnsupportedOperationException(); + + } + /** * Return a proxy object for a {@link Future} suitable for use in an RMI * environment (the default implementation returns its argument). Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java 2012-10-11 17:55:39 UTC (rev 6671) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -30,7 +30,7 @@ import java.io.File; import java.io.IOException; import java.io.InputStream; -import java.io.PrintStream; +import java.io.PrintWriter; import java.nio.ByteBuffer; import java.util.Date; import java.util.Iterator; @@ -268,18 +268,30 @@ final DumpJournal dumpJournal = new DumpJournal(journal); - dumpJournal.dumpJournal(System.out, namespaces, - dumpHistory, dumpPages, dumpIndices, showTuples); + final PrintWriter out = new PrintWriter(System.out, true/* autoFlush */); - for(Long addr : addrs) { - - System.out.println("addr=" + addr + ", offset=" - + journal.getOffset(addr) + ", length=" - + journal.getByteCount(addr)); + try { - // Best effort attempt to dump the record. - System.out.println(dumpJournal.dumpRawRecord(addr)); + dumpJournal.dumpJournal(out, namespaces, dumpHistory, + dumpPages, dumpIndices, showTuples); + for (Long addr : addrs) { + + out.println("addr=" + addr + ", offset=" + + journal.getOffset(addr) + ", length=" + + journal.getByteCount(addr)); + + // Best effort attempt to dump the record. + out.println(dumpJournal.dumpRawRecord(addr)); + + } + + out.flush(); + + } finally { + + out.close(); + } } finally { @@ -319,9 +331,20 @@ public void dumpJournal(final boolean dumpHistory, final boolean dumpPages, final boolean dumpIndices, final boolean showTuples) { - dumpJournal(System.out, null/* namespaces */, dumpHistory, dumpPages, - dumpIndices, showTuples); - + final PrintWriter w = new PrintWriter(System.out, true/* autoFlush */); + + try { + + dumpJournal(w, null/* namespaces */, dumpHistory, dumpPages, + dumpIndices, showTuples); + + w.flush(); + + } finally { + + w.close(); + } + } /** @@ -342,7 +365,7 @@ * @param showTuples * Dump the records in the indices. */ - public void dumpJournal(final PrintStream out, final List<String> namespaces, + public void dumpJournal(final PrintWriter out, final List<String> namespaces, final boolean dumpHistory, final boolean dumpPages, final boolean dumpIndices, final boolean showTuples) { @@ -575,7 +598,7 @@ } - public void dumpGlobalRowStore(final PrintStream out) { + public void dumpGlobalRowStore(final PrintWriter out) { final SparseRowStore grs = journal.getGlobalRowStore(journal .getLastCommitTime()); @@ -618,7 +641,7 @@ * @param journal * @param commitRecord */ - private void dumpNamedIndicesMetadata(final PrintStream out, + private void dumpNamedIndicesMetadata(final PrintWriter out, final List<String> namespaces, final ICommitRecord commitRecord, final boolean dumpPages, final boolean dumpIndices, final boolean showTuples) { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/service/IService.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/service/IService.java 2012-10-11 17:55:39 UTC (rev 6671) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/service/IService.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -82,6 +82,14 @@ */ String getServiceName() throws IOException; +// /** +// * Return the service directory. +// * +// * @throws IOException +// * since you can use this method with RMI. +// */ +// File getServiceDirectory() throws IOException; + /** * Destroy the service. If the service is running, it is shutdown * immediately and then destroyed. This method has the same signature as Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java 2012-10-11 17:55:39 UTC (rev 6671) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -469,6 +469,11 @@ public void installRootBlocksFromQuorum(IRootBlockView rootBlock) { throw new UnsupportedOperationException(); } + + @Override + public File getServiceDir() { + throw new UnsupportedOperationException(); + } }; } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java 2012-10-11 17:55:39 UTC (rev 6671) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -324,6 +324,17 @@ } /** + * Return the service directory. + * + * @see ConfigurationOptions#SERVICE_DIR + */ + public File getServiceDir() { + + return serviceDir; + + } + + /** * <code>true</code> iff this is a persistent service (one that you can * shutdown and restart). */ Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-11 17:55:39 UTC (rev 6671) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -64,6 +64,7 @@ import com.bigdata.journal.Journal; import com.bigdata.journal.ValidationError; import com.bigdata.journal.WORMStrategy; +import com.bigdata.journal.jini.ha.HAJournalServer.NSSConfigurationOptions; import com.bigdata.quorum.Quorum; import com.bigdata.quorum.zk.ZKQuorumImpl; import com.bigdata.rwstore.RWStore; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-11 17:55:39 UTC (rev 6671) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -26,6 +26,7 @@ import java.util.concurrent.locks.ReentrantLock; import net.jini.config.Configuration; +import net.jini.config.ConfigurationException; import net.jini.core.lookup.ServiceID; import net.jini.core.lookup.ServiceItem; import net.jini.core.lookup.ServiceRegistrar; @@ -56,6 +57,7 @@ import com.bigdata.jini.util.JiniUtil; import com.bigdata.journal.IHABufferStrategy; import com.bigdata.journal.IRootBlockView; +import com.bigdata.journal.ITx; import com.bigdata.quorum.Quorum; import com.bigdata.quorum.QuorumActor; import com.bigdata.quorum.QuorumEvent; @@ -64,7 +66,6 @@ import com.bigdata.quorum.zk.ZKQuorumImpl; import com.bigdata.rdf.sail.webapp.ConfigParams; import com.bigdata.rdf.sail.webapp.NanoSparqlServer; -import com.bigdata.service.DataService; import com.bigdata.service.jini.FakeLifeCycle; import com.bigdata.service.jini.JiniClient; import com.bigdata.service.jini.RemoteAdministrable; @@ -656,7 +657,7 @@ final ServiceItem serviceItem = discoveryClient .getServiceItem(serviceId); - + if (serviceItem == null) { // Not found (per the API). @@ -830,6 +831,122 @@ } /** + * Rebuild the backing store from scratch. + * <p> + * If we can not replicate ALL log files for the commit points that we + * need to make up on this service, then we can not incrementally + * resynchronize this service and we will have to do a full rebuild of + * the service instead. + * <p> + * A rebuild begins by pinning the history on the quorum by asserting a + * read lock (a read-only tx against then current last commit time). + * This prevents the history from being recycled, but does not prevent + * concurrent writes on the existing backing store extent, or extension + * of the backing store. + * <p> + * While holding that read lock, we need to make a copy of the bytes in + * the backing store. This copy can be streamed. It must start at the + * first valid offset beyond the root blocks since we do not want to + * update the root blocks until we have caught up with and replayed the + * existing HA Log files. If the file is extended, we do not need to + * copy the extension. Note that the streamed copy does not represent + * any coherent commit point. However, once we apply ALL of the HA Log + * files up to the last commit time that we pinned with a read lock, + * then the local backing file will be binary consistent with that + * commit point and we apply both the starting and ending root block for + * that commit point, and finally release the read lock. + * <p> + * At this point, we are still not up to date. However, the HALog files + * required to bring us up to date should exist and we can enter the + * normal resynchronization logic. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + * + * TODO We are not actually binary consistent (even if we are + * data consistent) until we have either (A) joined the met + * quourm; or (B) replayed the HA Logs up to commitCounter+1 for + * the commitCounter on the leader as of the moment that we + * finished streaming the leader's backing file to this node. + * + * Blow off the root blocks (zero commit counters). Then install + * when known synched to specific commit point and enter + * resync.+ + * + * TODO We need the ability to conditionally apply the root + * blocks when advancing through the HA Log files up to the read + * lock. + */ + private class RebuildTask implements Callable<Void> { + + private final long token; + private final S leader; + + public RebuildTask() { + + // run while quorum is met. + token = getQuorum().token(); + + // The leader for that met quorum (RMI interface). + leader = getLeader(token); + + } + + @Override + public Void call() throws Exception { + + final long readLock = leader.newTx(ITx.READ_COMMITTED); + + try { + + doRun(readLock); + + } catch (Throwable t) { + + if (InnerCause.isInnerCause(t, InterruptedException.class)) { + + log.info("Interrupted."); + + } + + log.error(t, t); + + } finally { + + // release the read lock. + leader.abort(readLock); + + } + + return null; + + } + + private void doRun(final long readLock) throws Exception { + + haLog.warn("REBUILD: " + server.getServiceName()); + + /* + * Note: We need to discard any writes that might have been + * buffered before we start the resynchronization of the local + * store. + */ + + journal.doLocalAbort(); + + /* + * FIXME REBUILD : Implement logic to copy all data from + * the leader's journal (except the root block) and then + * apply the HA Log files up to the commit point pinned + * by the readLock. + */ + throw new UnsupportedOperationException(); + + } + + } // class RebuildTask + + /** * This class handles the resynchronization of a node that is not at the * same commit point as the met quorum. The task will replicate write * sets (HA Log files) from the services in the met quorum, and apply @@ -971,22 +1088,39 @@ /* * Oops. The leader does not have that log file. * - * TODO REBUILD : If we can not replicate ALL log files for - * the commit points that we need to make up on this - * service, then we can not incrementally resynchronize this - * service and we will have to do a full rebuild of the - * service instead. + * We will have to rebuild the service from scratch since we + * do not have the necessary HA Log files to synchronize + * with the existing quorum. * * TODO RESYNC : It is possible to go to another service in * the met quorum for the same log file, but it needs to be * a service that is UPSTREAM of this service. */ - // Abort the resynchronization effort. - throw new RuntimeException( - "HA Log not available: commitCounter=" - + commitCounter, ex); + final String msg = "HA Log not available: commitCounter=" + + commitCounter; + log.error(msg); + + final FutureTask<Void> ft = new FutureTaskMon<Void>( + new RebuildTask()); + + try { + + // Run service rebuild task. + journal.getExecutorService().submit(ft); + + ft.get(); + + } finally { + + ft.cancel(true/* mayInterruptIfRunning */); + + } + + // Re-enter the resync protocol. + return; + } // root block when the quorum started that write set. @@ -1554,6 +1688,11 @@ rootBlock); } + + @Override + public File getServiceDir() { + return server.getServiceDir(); + } } @@ -1680,10 +1819,13 @@ } /** - * Adds jini administration interfaces to the basic {@link DataService}. + * Adds jini administration interfaces to the basic {@link HAGlue} interface + * exposed by the {@link HAJournal}. * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ + * @see HAJournal.HAGlueService + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> */ public static class AdministrableHAGlueService extends HAGlueDelegate implements RemoteAdministrable, RemoteDestroyAdmin { @@ -1875,6 +2017,27 @@ // // } + @Override + public int getNSSPort() { + + final String COMPONENT = NSSConfigurationOptions.COMPONENT; + + try { + + final Integer port = (Integer) server.config.getEntry( + COMPONENT, NSSConfigurationOptions.PORT, Integer.TYPE, + NSSConfigurationOptions.DEFAULT_PORT); + + return port; + + } catch (ConfigurationException e) { + + throw new RuntimeException(e); + + } + + } + } } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/service/jini/RemoteAdministrable.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/service/jini/RemoteAdministrable.java 2012-10-11 17:55:39 UTC (rev 6671) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/service/jini/RemoteAdministrable.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.service.jini; import java.rmi.Remote; @@ -18,11 +41,6 @@ * public interface RemoteDiscoveryAdmin extends Remote, DiscoveryAdmin { * * } - * - * public interface RemoteStorageLocationAdmin extends Remote, - * StorageLocationAdmin { - * - * } * </pre> */ public interface RemoteAdministrable extends Remote, Administrable { Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/service/jini/RemoteStorageLocationAdmin.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/service/jini/RemoteStorageLocationAdmin.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/service/jini/RemoteStorageLocationAdmin.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -0,0 +1,39 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Jun 1, 2010 + */ +package com.bigdata.service.jini; + +import java.rmi.Remote; + +import com.sun.jini.admin.StorageLocationAdmin; + +/** + * Extends {@link Remote} for RMI compatibility. + */ +public interface RemoteStorageLocationAdmin extends Remote, + StorageLocationAdmin { + +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2012-10-11 17:55:39 UTC (rev 6671) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -36,8 +36,8 @@ import org.openrdf.model.impl.URIImpl; import org.openrdf.model.vocabulary.RDF; +import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.IIndexManager; -import com.bigdata.journal.Journal; import com.bigdata.rdf.axioms.Axioms; import com.bigdata.rdf.axioms.NoAxioms; import com.bigdata.rdf.axioms.OwlAxioms; @@ -636,11 +636,12 @@ final IIndexManager indexManager = tripleStore.getIndexManager(); - if (indexManager instanceof Journal) { + // TODO FEDERATION HA + if (indexManager instanceof AbstractJournal) { - final Journal jnl = (Journal) indexManager; + final AbstractJournal jnl = (AbstractJournal) indexManager; - if (jnl.getQuorum() != null) { + if (jnl.isHighlyAvailable()) { g.add(aService, SD.feature, HighlyAvailable); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2012-10-11 17:55:39 UTC (rev 6671) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2012-10-12 14:36:58 UTC (rev 6672) @@ -22,9 +22,11 @@ */ package com.bigdata.rdf.sail.webapp; +import java.io.File; +import java.io.FilenameFilter; import java.io.IOException; import java.io.OutputStreamWriter; -import java.io.PrintStream; +import java.io.PrintWriter; import java.io.Writer; import java.util.Collections; import java.util.Comparator; @@ -42,6 +44,7 @@ import javax.servlet.http.HttpServletResponse; import org.apache.log4j.Logger; +import org.apache.zookeeper.KeeperException; import com.bigdata.bop.BOpUtility; import com.bigdata.bop.PipelineOp; @@ -52,16 +55,21 @@ import com.bigdata.bop.engine.QueryLog; import com.bigdata.bop.fed.QueryEngineFactory; import com.bigdata.counters.CounterSet; +import com.bigdata.ha.HAGlue; +import com.bigdata.ha.HALogWriter; +import com.bigdata.ha.QuorumService; import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.DumpJournal; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.Journal; +import com.bigdata.quorum.zk.ZKQuorumImpl; import com.bigdata.rdf.sail.sparql.ast.SimpleNode; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.RunningQuery; import com.bigdata.rdf.sparql.ast.ASTContainer; import com.bigdata.rdf.sparql.ast.QueryRoot; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.util.InnerCause; +import com.bigdata.zookeeper.DumpZookeeper; /** * A status page for the service. @@ -328,7 +336,8 @@ w.flush(); // dump onto the response. - final PrintStream out = new PrintStream(resp.getOutputStream()); + final PrintWriter out = new PrintWriter(resp.getOutputStream(), + true/* autoFlush */); out.print("<pre>\n"); @@ -344,7 +353,8 @@ final boolean dumpTuples = false; - dump.dumpJournal(out, namespaces, dumpHistory, dumpPages, dumpIndices, dumpTuples); + dump.dumpJournal(out, namespaces, dumpHistory, dumpPages, + dumpIndices, dumpTuples); // flush PrintStream before resuming writes on Writer. out.flush(); @@ -355,6 +365,226 @@ } +// final boolean showQuorum = req.getParameter(SHOW_QUORUM) != null; + + if (getIndexManager() instanceof AbstractJournal + && ((AbstractJournal) getIndexManager()) + .isHighlyAvailable()) { + + /* + * Show the interesting things about the quorum. + * + * 1. QuorumState + * + * 2. Who is the leader, who is a follower. + * + * 3. What is the SPARQL end point for each leader and follower + * (where will this be published? HAGlue? HAJournalServer admin + * interface?) + * + * 4. dumpZoo (into pre element). + * + * 5. listServices (into pre element). + * + * TODO Simpler REST request to decide if this node is a leader, + * a follower, synchronizing, or rebuilding (these states should + * be reported through the admin api and/or by updating the + * Entry[] for the service; this last could be done by the + * quorum listener in the HAJournalServer). + */ + + final AbstractJournal journal = (AbstractJournal) getIndexManager(); + + final ZKQuorumImpl<HAGlue, QuorumService<HAGlue>> quorum = (ZKQuorumImpl<HAGlue, QuorumService<HAGlue>>) journal + .getQuorum(); + + // The current token. + final long quorumToken = quorum.token(); + + // The last valid token. + final long lastValidToken = quorum.lastValidToken(); + + final int njoined = quorum.getJoined().length; + + final QuorumService<HAGlue> quorumService = quorum.getClient(); + + current.node("h1", "High Availability"); + + // The quorum state. + { + + final XMLBuilder.Node p = current.node("p"); + + p.text("The quorum is " + + (quorum.isQuorumMet() ? "" : "not") + " met.") + .node("br").close(); + + p.text("" + njoined + " out of " + + quorum.replicationFactor() + + " services are joined.").node("br").close(); + + p.text("quorumToken=" + quorumToken + ", lastValidToken=" + + lastValidToken).node("br").close(); + + p.text("logicalServiceId=" + + quorumService.getLogicalServiceId()).node("br") + .close(); + + /* + * Report on the Service. + */ + { + final File serviceDir = quorumService.getServiceDir(); + p.text("ServiceDir: path=" + serviceDir).node("br") + .close(); + } + + /* + * Report on the Journal. + */ + { + final File file = journal.getFile(); + if (file != null) { + p.text("DataDir: path=" + file.getParent()) + .node("br").close(); + } + } + + /* + * Report #of files and bytes in the HALog directory. + */ + { + final File haLogDir = quorumService.getHALogDir(); + final File[] a = haLogDir + .listFiles(new FilenameFilter() { + @Override + public boolean accept(File dir, String name) { + return name + .endsWith(HALogWriter.HA_LOG_EXT); + } + }); + int nfiles = 0; + long nbytes = 0L; + for (File file : a) { + nbytes += file.length(); + nfiles++; + } + p.text("HALogDir: nfiles=" + nfiles + ", nbytes=" + + nbytes + ", path=" + haLogDir).node("br") + .close(); + } + p.close(); + + current.node("pre", quorum.toString()); + + } + + /* + * Display the NSS port, host, and leader/follower/not-joined + * status for each service in the quorum. + */ + current.node("h2", "Quorum Services"); + { + final XMLBuilder.Node p = current.node("p"); + + final UUID[] joined = quorum.getJoined(); + + final UUID[] pipeline = quorum.getPipeline(); + + for (UUID serviceId : quorum.getMembers()) { + + final HAGlue remoteService; + try { + + remoteService = quorumService.getService(serviceId); + + } catch (RuntimeException ex) { + + /* + * Ignore. Might not be an HAGlue instance. + */ + + continue; + + } + + /* + * Note: This is not actually reporting the interface + * that the port is exposed to. + */ + + final String hostname = remoteService.getHostname(); + + final int nssPort = remoteService.getNSSPort(); + + final boolean isLeader = serviceId.equals(quorum + .getLeaderId()); + + final boolean isFollower = indexOf(serviceId, joined) > 0; + + final int pipelineIndex = indexOf(serviceId, pipeline); + + p.text(hostname + + " : nssPort=" + + nssPort + + " : " + + (isLeader ? "leader" + : (isFollower ? "follower" + : " is not joined")) + + ", pipelineOrder=" + + (pipelineIndex == -1 ? " is not in pipeline" + : pipelineIndex)).node("br").close(); + + } + + p.close(); + + } + + // DumpZookeeper + { + + current.node("h2", "Zookeeper"); + + // final XMLBuilder.Node section = current.node("pre"); + // flush writer before writing on PrintStream. + w.flush(); + + // dump onto the response. + final PrintWriter out = new PrintWriter( + resp.getOutputStream(), true/* autoFlush */); + + out.print("<pre>\n"); + + try { + + final DumpZookeeper dump = new DumpZookeeper( + quorum.getZookeeper()); + + dump.dump(out, true/* showDatatrue */, + quorumService.getLogicalServiceId()/* zpath */, + 0/* depth */); + + } catch (InterruptedException e) { + + e.printStackTrace(out); + + } catch (KeeperException e) { + + e.printStackTrace(out); + + } + + // flush PrintWriter before resuming writes on Writer. + out.flush(); + + // close section. + out.print("\n</pre>"); + + } + + } + current.node("br", "Accepted query count=" + getBigdataRDFContext().getQueryIdFactory().get()); @@ -814,4 +1044,34 @@ }); } + /** + * Return the index of the given {@link UUID} in the array of {@link UUID}s. + * + * @param x + * The {@link UUID} + * @param a + * The array of {@link UUID}s. + * + * @return The index of the {@link UUID} in the array -or- <code>-1</code> + * if the {@link UUID} does not appear in the array. + */ + static private int indexOf(final UUID x, final UUID[] a) { + + if (x == null) + throw new IllegalArgumentException(); + + for (int i = 0; i < a.length; i++) { + + if (x.equals(a[i])) { + + return i; + + } + + } + + return -1; + + } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-10-12 18:46:56
|
Revision: 6674 http://bigdata.svn.sourceforge.net/bigdata/?rev=6674&view=rev Author: thompsonbry Date: 2012-10-12 18:46:50 +0000 (Fri, 12 Oct 2012) Log Message: ----------- Modified the NSS to start immediately when the HAJournalServer starts. The NSS now submits a task that handles the default KB create. For HA, that task will block until the quorum meets, but the NSS is not blocked and will begin responding immediately. Of course, until the quorum meets, you can not do much but at least the home page is up. You CAN use the status page as well to examine the state of the quorum. The NSS will respond to SPARQL QUERY and UPDATE requests with "Quorum is not met" until the quorum meets, at which point that task will create the default KB. https://sourceforge.net/apps/trac/bigdata/ticket/530 (Journal HA) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CreateKBTask.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-10-12 14:56:00 UTC (rev 6673) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-10-12 18:46:50 UTC (rev 6674) @@ -4623,67 +4623,6 @@ * <p> * Access to this field is protected by the {@link #_fieldReadWriteLock} but * MUST also be coordinated as described above. - * - * FIXME HA: Many methods need to be modified to (a) await a quorum if there is - * none; and (b) handle a change in the quorum token. When a quorum breaks, - * these operations can block until a new quorum meets. As long as the - * master has not changed, the master's state remains valid and the other - * nodes can be brought into synchronization (they are synchronized if they - * are at the same message count for the write pipeline). If the master - * fails, then the new master will not have the same buffered write set and - * the outstanding operations (both unisolated and read/write tx) must about - * so we can resume from a known good commit point (this is true even if the - * secondaries remain active since (a) we do not have a strong guarantee - * that the new master is at the same message count on the write pipeline as - * the old master; and (b) transaction write sets are buffered within the - * JVM and/or disk on the master and are not available to the secondaries on - * failover. - * - * @see #read(long) - * @see #write(ByteBuffer) - * @see #write(ByteBuffer, long) - * @see #delete(long) - * @see #closeForWrites(long) However, some methods probably should be - * allowed to proceed without a quorum (or a node must be permitted to - * disconnect permanently from a quorum). E.g.: - * - * @see #close() - * @see #destroy() Another interesting question is whether we have to be in - * a quorum to create a new journal. I would say, "yes" for a data - * service. The atomic cutover to a new journal should only be taken by - * a quorum. - * <p> - * Probably you need to be in a quorum to create an HA journal (outside - * of a data service) as well. That would appear to be necessary in - * order to create the same initial root blocks on each node. In that - * case we have a bootstrapping problem for new HA journals. Either - * they must have a quorum before hand and go through a coordinated - * "commit" protocol for the journal create or they should be created - * first, then negotiate the quorum membership and who is the master - * and then resynchronize before the journal comes on line. - * - * @todo maintain readOnly flag based on whether or not the QuorumService - * was the leader the last time the quorum met and whether the token - * is still valid. use a lightweight test for a valid token to avoid - * lock contention. - * - * @todo readers should not block and await a quorum unless we are willing - * to handle the case where this QuorumService is not joined with the - * met quorum either by reading on the quorum or by throwing an - * exception back to the application. - * - * @todo In HA mode, the followers are read-only. However, that is a - * high-level constraint on application writes. For synchronization, - * we need to be able to write on the journal even when it is not - * joined with the quorum. The WriteCacheService will refuse to relay - * writes if the service is not the leader of a met quorum. - * <p> - * For example, it would be appropriate to impose the constraint on - * which nodes can be read or written at a load balancer at the SAIL - * layer. - * - * if (quorum.isHighlyAvailable() && quorum.isQuorumMet() && - * quorum.getClient().isFollower(quorumToken)) { return true; } */ private volatile long quorumToken = Quorum.NO_QUORUM; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-12 14:56:00 UTC (rev 6673) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-12 18:46:50 UTC (rev 6674) @@ -64,7 +64,6 @@ import com.bigdata.journal.Journal; import com.bigdata.journal.ValidationError; import com.bigdata.journal.WORMStrategy; -import com.bigdata.journal.jini.ha.HAJournalServer.NSSConfigurationOptions; import com.bigdata.quorum.Quorum; import com.bigdata.quorum.zk.ZKQuorumImpl; import com.bigdata.rwstore.RWStore; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-12 14:56:00 UTC (rev 6673) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-12 18:46:50 UTC (rev 6674) @@ -505,6 +505,21 @@ } /* + * The NSS will start on each service in the quorum. However, + * only the leader will create the default KB (if that option is + * configured). + */ + try { + + startNSS(); + + } catch (Exception e1) { + + log.error("Could not start NanoSparqlServer: " + e1, e1); + + } + + /* * Wait until the server is terminated. */ @@ -754,31 +769,6 @@ } - if (isJoinedMember(token) && server.jettyServer == null) { - /* - * The NSS will start on each service in the quorum. However, - * only the leader will create the default KB (if that option is - * configured). - * - * Submit task since we can not do this in the event thread. - */ - journal.getExecutorService().submit(new Callable<Void>() { - @Override - public Void call() throws Exception { - if (server.jettyServer == null) { - try { - server.startNSS(); - } catch (Exception e1) { - log.error("Could not start NanoSparqlServer: " - + e1, e1); - } - } - return null; - } - }); - - } - } @Override @@ -1695,6 +1685,27 @@ } } + +// /** +// * Conditionally create the default KB instance. +// * +// * @see NSSConfigurationOptions +// * +// * @throws Exception +// */ +// private void conditionalCreateDefaultKB() throws Exception { +// +// final String COMPONENT = NSSConfigurationOptions.COMPONENT; +// +// final String namespace = (String) config.getEntry(COMPONENT, +// NSSConfigurationOptions.NAMESPACE, String.class, +// NSSConfigurationOptions.DEFAULT_NAMESPACE); +// +// final boolean create = (Boolean) config.getEntry(COMPONENT, +// NSSConfigurationOptions.CREATE, Boolean.TYPE, +// NSSConfigurationOptions.DEFAULT_CREATE); +// +// } /** * Setup and start the {@link NanoSparqlServer}. @@ -1734,9 +1745,16 @@ initParams.put(ConfigParams.QUERY_THREAD_POOL_SIZE, queryPoolThreadSize.toString()); + // Note: Create will be handled by the QuorumListener (above). initParams.put(ConfigParams.CREATE, Boolean.toString(create)); } + + if (jettyServer != null && jettyServer.isRunning()) { + + throw new RuntimeException("Already running"); + + } // Setup the embedded jetty server for NSS webapp. jettyServer = NanoSparqlServer.newInstance(port, journal, initParams); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java 2012-10-12 14:56:00 UTC (rev 6673) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java 2012-10-12 18:46:50 UTC (rev 6674) @@ -48,24 +48,18 @@ import com.bigdata.counters.CounterSet; import com.bigdata.counters.ICounterSetAccess; import com.bigdata.counters.IProcessCounters; -import com.bigdata.ha.HAGlue; -import com.bigdata.ha.QuorumService; import com.bigdata.io.DirectBufferPool; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.ITransactionService; import com.bigdata.journal.ITx; import com.bigdata.journal.Journal; -import com.bigdata.quorum.AsynchronousQuorumCloseException; -import com.bigdata.quorum.Quorum; import com.bigdata.rdf.ServiceProviderHook; import com.bigdata.rdf.sail.BigdataSail; -import com.bigdata.rdf.store.ScaleOutTripleStore; import com.bigdata.service.AbstractDistributedFederation; import com.bigdata.service.DefaultClientDelegate; import com.bigdata.service.IBigdataClient; import com.bigdata.service.IBigdataFederation; import com.bigdata.service.jini.JiniClient; -import com.bigdata.service.jini.JiniFederation; import com.bigdata.util.httpd.AbstractHTTPD; /** @@ -78,7 +72,7 @@ public class BigdataRDFServletContextListener implements ServletContextListener { - static private final transient Logger log = Logger + private static final transient Logger log = Logger .getLogger(BigdataRDFServletContextListener.class); private Journal jnl = null; @@ -179,93 +173,14 @@ if (create) { - if (indexManager instanceof Journal) { + /* + * Note: Nobody is watching this future. The task will log any + * errors. + */ - /* - * Create a local triple store. - * - * Note: This hands over the logic to some custom code located - * on the BigdataSail. - */ + indexManager.getExecutorService().submit( + new CreateKBTask(indexManager, namespace)); - final Journal jnl = (Journal) indexManager; - - final Quorum<HAGlue, QuorumService<HAGlue>> quorum = jnl - .getQuorum(); - - boolean isSoloOrLeader; - if (quorum == null) { - - isSoloOrLeader = true; - - } else { - - final long token; - try { - log.warn("Awaiting quorum."); - token = quorum.awaitQuorum(); - } catch (AsynchronousQuorumCloseException e1) { - throw new RuntimeException(e1); - } catch (InterruptedException e1) { - throw new RuntimeException(e1); - } - - if (quorum.getMember().isLeader(token)) { - isSoloOrLeader = true; - } else { - isSoloOrLeader = false; - } - } - - if (isSoloOrLeader) { - - // Attempt to resolve the namespace. - if (indexManager.getResourceLocator().locate(namespace, - ITx.UNISOLATED) == null) { - - log.warn("Creating KB instance: namespace=" + namespace); - - final Properties properties = new Properties( - jnl.getProperties()); - - // override the namespace. - properties.setProperty(BigdataSail.Options.NAMESPACE, - namespace); - - // create the appropriate as configured triple/quad - // store. - BigdataSail.createLTS(jnl, properties); - - } // if( tripleStore == null ) - - } - - } else { - - // Attempt to resolve the namespace. - if (indexManager.getResourceLocator().locate(namespace, - ITx.UNISOLATED) == null) { - - /* - * Register triple store for scale-out. - */ - - log.warn("Creating KB instance: namespace=" + namespace); - - final JiniFederation<?> fed = (JiniFederation<?>) indexManager; - - final Properties properties = fed.getClient() - .getProperties(); - - final ScaleOutTripleStore lts = new ScaleOutTripleStore( - indexManager, namespace, ITx.UNISOLATED, properties); - - lts.create(); - - } // if( tripleStore == null ) - - } - } // if( create ) txs = (indexManager instanceof Journal ? ((Journal) indexManager) Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CreateKBTask.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CreateKBTask.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CreateKBTask.java 2012-10-12 18:46:50 UTC (rev 6674) @@ -0,0 +1,151 @@ +package com.bigdata.rdf.sail.webapp; + +import java.util.Properties; +import java.util.concurrent.Callable; + +import org.apache.log4j.Logger; + +import com.bigdata.ha.HAGlue; +import com.bigdata.ha.QuorumService; +import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.ITx; +import com.bigdata.journal.Journal; +import com.bigdata.quorum.AsynchronousQuorumCloseException; +import com.bigdata.quorum.Quorum; +import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.store.ScaleOutTripleStore; +import com.bigdata.service.jini.JiniFederation; + +/** + * Task creates a KB for the given namespace iff no such KB exists. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class CreateKBTask implements Callable<Void> { + + private static final transient Logger log = Logger + .getLogger(BigdataRDFServletContextListener.class); + + private final IIndexManager indexManager; + private final String namespace; + + public CreateKBTask(final IIndexManager indexManager, + final String namespace) { + + this.indexManager = indexManager; + this.namespace = namespace; + + } + + public Void call() throws Exception { + + try { + + doRun(); + + } catch (Throwable t) { + + log.error(t, t); + + throw new Exception(t); + + } + + return null; + + } + + private void doRun() { + + if (indexManager instanceof AbstractJournal) { + + /* + * Create a local triple store. + * + * Note: This hands over the logic to some custom code located + * on the BigdataSail. + */ + + final Journal jnl = (Journal) indexManager; + + final Quorum<HAGlue, QuorumService<HAGlue>> quorum = jnl + .getQuorum(); + + boolean isSoloOrLeader; + + if (quorum == null) { + + isSoloOrLeader = true; + + } else { + + final long token; + try { + log.warn("Awaiting quorum."); + token = quorum.awaitQuorum(); + } catch (AsynchronousQuorumCloseException e1) { + throw new RuntimeException(e1); + } catch (InterruptedException e1) { + throw new RuntimeException(e1); + } + + if (quorum.getMember().isLeader(token)) { + isSoloOrLeader = true; + } else { + isSoloOrLeader = false; + } + } + + if (isSoloOrLeader) { + + // Attempt to resolve the namespace. + if (indexManager.getResourceLocator().locate(namespace, + ITx.UNISOLATED) == null) { + + log.warn("Creating KB instance: namespace=" + namespace); + + final Properties properties = new Properties( + jnl.getProperties()); + + // override the namespace. + properties.setProperty(BigdataSail.Options.NAMESPACE, + namespace); + + // create the appropriate as configured triple/quad + // store. + BigdataSail.createLTS(jnl, properties); + + } // if( tripleStore == null ) + + } + + } else { + + // Attempt to resolve the namespace. + if (indexManager.getResourceLocator().locate(namespace, + ITx.UNISOLATED) == null) { + + /* + * Register triple store for scale-out. + */ + + log.warn("Creating KB instance: namespace=" + namespace); + + final JiniFederation<?> fed = (JiniFederation<?>) indexManager; + + final Properties properties = fed.getClient() + .getProperties(); + + final ScaleOutTripleStore lts = new ScaleOutTripleStore( + indexManager, namespace, ITx.UNISOLATED, properties); + + lts.create(); + + } // if( tripleStore == null ) + + } + + } + +} \ No newline at end of file This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2012-10-18 12:55:33
|
Revision: 6676 http://bigdata.svn.sourceforge.net/bigdata/?rev=6676&view=rev Author: martyncutcher Date: 2012-10-18 12:55:22 +0000 (Thu, 18 Oct 2012) Log Message: ----------- Provide IHALogReader for HALogWriter to allow readers concurretn with Writer. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogReader.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogWriter.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/IHALogReader.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/ha/halog/ Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogReader.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogReader.java 2012-10-12 21:01:58 UTC (rev 6675) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogReader.java 2012-10-18 12:55:22 UTC (rev 6676) @@ -16,11 +16,13 @@ import com.bigdata.io.FileChannelUtility; import com.bigdata.io.IBufferAccess; import com.bigdata.io.IReopenChannel; +import com.bigdata.journal.IBufferStrategy; import com.bigdata.journal.IHABufferStrategy; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.RootBlockUtility; import com.bigdata.journal.StoreTypeEnum; import com.bigdata.journal.WORMStrategy; +import com.bigdata.rawstore.IAddressManager; import com.bigdata.util.ChecksumError; import com.bigdata.util.ChecksumUtility; @@ -39,423 +41,459 @@ * * @author Martyn Cutcher */ -public class HALogReader { - - private static final Logger log = Logger.getLogger(HALogReader.class); - - private final File m_file; - private final RandomAccessFile m_raf; +public class HALogReader implements IHALogReader { + + private static final Logger log = Logger.getLogger(HALogReader.class); + + private final File m_file; + private final RandomAccessFile m_raf; private final FileChannel m_channel; private final IRootBlockView m_openRootBlock; private final IRootBlockView m_closeRootBlock; private final StoreTypeEnum m_storeType; private final int magic; private final int version; - + public HALogReader(final File file) throws IOException { - m_file = file; - + m_file = file; + m_raf = new RandomAccessFile(file, "r"); m_channel = m_raf.getChannel(); - try { - /** - * Must determine whether the file has consistent open and committed - * rootBlocks, using the commitCounter to determine which rootBlock - * is which. - * - * Note: Both root block should exist (they are both written on - * startup). If they are identical, then the log is empty (the - * closing root block has not been written and the data in the log - * is useless). - * - * We figure out which root block is the opening root block based on - * standard logic. - */ - /* - * Read the MAGIC and VERSION. - */ - m_raf.seek(0L); - try { - /* - * Note: this next line will throw IOException if there is a - * file lock contention. - */ - magic = m_raf.readInt(); - } catch (IOException ex) { - throw new RuntimeException( - "Can not read magic. Is file locked by another process?", - ex); - } - if (magic != HALogWriter.MAGIC) - throw new RuntimeException("Bad journal magic: expected=" - + HALogWriter.MAGIC + ", actual=" + magic); - version = m_raf.readInt(); - if (version != HALogWriter.VERSION1) - throw new RuntimeException("Bad journal version: expected=" - + HALogWriter.VERSION1 + ", actual=" + version); + try { + /** + * Must determine whether the file has consistent open and committed + * rootBlocks, using the commitCounter to determine which rootBlock + * is which. + * + * Note: Both root block should exist (they are both written on + * startup). If they are identical, then the log is empty (the + * closing root block has not been written and the data in the log + * is useless). + * + * We figure out which root block is the opening root block based on + * standard logic. + */ + /* + * Read the MAGIC and VERSION. + */ + m_raf.seek(0L); + try { + /* + * Note: this next line will throw IOException if there is a + * file lock contention. + */ + magic = m_raf.readInt(); + } catch (IOException ex) { + throw new RuntimeException( + "Can not read magic. Is file locked by another process?", + ex); + } + if (magic != HALogWriter.MAGIC) + throw new RuntimeException("Bad journal magic: expected=" + + HALogWriter.MAGIC + ", actual=" + magic); + version = m_raf.readInt(); + if (version != HALogWriter.VERSION1) + throw new RuntimeException("Bad journal version: expected=" + + HALogWriter.VERSION1 + ", actual=" + version); - final RootBlockUtility tmp = new RootBlockUtility(reopener, file, - true/* validateChecksum */, false/* alternateRootBlock */, - false/* ignoreBadRootBlock */); + final RootBlockUtility tmp = new RootBlockUtility(reopener, file, + true/* validateChecksum */, false/* alternateRootBlock */, + false/* ignoreBadRootBlock */); - m_closeRootBlock = tmp.chooseRootBlock(); + m_closeRootBlock = tmp.chooseRootBlock(); - m_openRootBlock = tmp.rootBlock0 == m_closeRootBlock ? tmp.rootBlock1 - : tmp.rootBlock0; + m_openRootBlock = tmp.rootBlock0 == m_closeRootBlock ? tmp.rootBlock1 + : tmp.rootBlock0; - final long cc0 = m_openRootBlock.getCommitCounter(); + final long cc0 = m_openRootBlock.getCommitCounter(); - final long cc1 = m_closeRootBlock.getCommitCounter(); + final long cc1 = m_closeRootBlock.getCommitCounter(); - if ((cc0 + 1) != cc1 && (cc0 != cc1)) { - /* - * Counters are inconsistent with either an empty log file or a - * single transaction scope. - */ - throw new IllegalStateException("Incompatible rootblocks: cc0=" - + cc0 + ", cc1=" + cc1); - } + if ((cc0 + 1) != cc1 && (cc0 != cc1)) { + /* + * Counters are inconsistent with either an empty log file or a + * single transaction scope. + */ + throw new IllegalStateException("Incompatible rootblocks: cc0=" + + cc0 + ", cc1=" + cc1); + } - m_channel.position(HALogWriter.headerSize0); - - m_storeType = m_openRootBlock.getStoreType(); + m_channel.position(HALogWriter.headerSize0); - } catch (Throwable t) { - - close(); - - throw new RuntimeException(t); - - } - + m_storeType = m_openRootBlock.getStoreType(); + + } catch (Throwable t) { + + close(); + + throw new RuntimeException(t); + + } + } - - /** - * Hook for {@link FileChannelUtility#readAll(FileChannel, ByteBuffer, long)} - */ - private final IReopenChannel<FileChannel> reopener = new IReopenChannel<FileChannel>() { - @Override - public FileChannel reopenChannel() throws IOException { + /** + * Hook for + * {@link FileChannelUtility#readAll(FileChannel, ByteBuffer, long)} + */ + private final IReopenChannel<FileChannel> reopener = new IReopenChannel<FileChannel>() { - if (m_channel == null) - throw new IOException("Closed"); + @Override + public FileChannel reopenChannel() throws IOException { - return m_channel; + if (m_channel == null) + throw new IOException("Closed"); - } - }; + return m_channel; - public void close() { + } + }; - if (m_channel.isOpen()) { + public void close() { - try { - m_raf.close(); - } catch (IOException e) { - log.error("Problem closing file: file=" + m_file + " : " + e, e); - } - - } + if (m_channel.isOpen()) { - } + try { + m_raf.close(); + } catch (IOException e) { + log + .error("Problem closing file: file=" + m_file + " : " + + e, e); + } - /** - * Return <code>true</code> if the root blocks in the log file have the same - * commit counter. Such log files are logically empty regardless of their - * length. - */ - public boolean isEmpty() { + } - return m_openRootBlock.getCommitCounter()==m_closeRootBlock.getCommitCounter(); - - } - - private void assertOpen() throws IOException { + } - if (!m_channel.isOpen()) - throw new IOException("Closed: " + m_file); + /** + * Return <code>true</code> if the root blocks in the log file have the same + * commit counter. Such log files are logically empty regardless of their + * length. + */ + public boolean isEmpty() { - } + return m_openRootBlock.getCommitCounter() == m_closeRootBlock + .getCommitCounter(); - /** - * The {@link IRootBlockView} for the committed state BEFORE the write set - * contained in the HA log file. - */ + } + + private void assertOpen() throws IOException { + + if (!m_channel.isOpen()) + throw new IOException("Closed: " + m_file); + + } + + /** + * The {@link IRootBlockView} for the committed state BEFORE the write set + * contained in the HA log file. + */ public IRootBlockView getOpeningRootBlock() { - return m_openRootBlock; - + return m_openRootBlock; + } /** - * The {@link IRootBlockView} for the committed state AFTER the write - * set contained in the HA log file has been applied. + * The {@link IRootBlockView} for the committed state AFTER the write set + * contained in the HA log file has been applied. */ public IRootBlockView getClosingRootBlock() { - - return m_closeRootBlock; - + + return m_closeRootBlock; + } - /** - * Checks whether we have reached the end of the file. - */ + /** + * Checks whether we have reached the end of the file. + */ public boolean hasMoreBuffers() throws IOException { - assertOpen(); + assertOpen(); - if(isEmpty()) { - - /* - * Ignore the file length if it is logically empty. - */ - - return false; - - } - - return m_channel.position() < m_channel.size(); - + if (isEmpty()) { + + /* + * Ignore the file length if it is logically empty. + */ + + return false; + + } + + return m_channel.position() < m_channel.size(); + } - + /** * To stream from the Channel, we can use the associated RandomAccessFile * since the FilePointer for one is the same as the other. */ - private class RAFInputStream extends InputStream { - + private static class RAFInputStream extends InputStream { + final RandomAccessFile m_raf; + + RAFInputStream(final RandomAccessFile raf) { + m_raf = raf; + } + @Override public int read() throws IOException { return m_raf.read(); } - + @Override - public int read(byte[] b, int off, int len) throws IOException { + public int read(byte[] b, int off, int len) throws IOException { return m_raf.read(b, off, len); } - + } - /** - * Attempts to read the next {@link IHAWriteMessage} and then the expected - * buffer, that is read into the client buffer. The {@link IHAWriteMessage} - * is returned to the caller. - * <p> - * Note: The caller's buffer will be filled in IFF the data is on the HALog. - * For some {@link IHABufferStrategy} implementations, that data is not - * present in the HALog. The caller's buffer will not be modified and the - * caller is responsible for getting the data from the - * {@link IHABufferStrategy} (e.g., for the {@link WORMStrategy}). - * <p> - * Note: IF the buffer is filled, then the limit will be the #of bytes ready - * to be transmitted and the position will be zero. - * - * @param clientBuffer - * A buffer from the {@link DirectBufferPool#INSTANCE}. - */ - public IHAWriteMessage processNextBuffer(final ByteBuffer clientBuffer) - throws IOException { + /** + * Attempts to read the next {@link IHAWriteMessage} and then the expected + * buffer, that is read into the client buffer. The {@link IHAWriteMessage} + * is returned to the caller. + * <p> + * Note: The caller's buffer will be filled in IFF the data is on the HALog. + * For some {@link IHABufferStrategy} implementations, that data is not + * present in the HALog. The caller's buffer will not be modified and the + * caller is responsible for getting the data from the + * {@link IHABufferStrategy} (e.g., for the {@link WORMStrategy}). + * <p> + * Note: IF the buffer is filled, then the limit will be the #of bytes ready + * to be transmitted and the position will be zero. + * + * @param clientBuffer + * A buffer from the {@link DirectBufferPool#INSTANCE}. + */ + public IHAWriteMessage processNextBuffer(final ByteBuffer clientBuffer) + throws IOException { - final ObjectInputStream objinstr = new ObjectInputStream( - new RAFInputStream()); + return processNextBuffer(m_raf, reopener, m_storeType, clientBuffer); - final IHAWriteMessage msg; - try { + } - msg = (IHAWriteMessage) objinstr.readObject(); + static public IHAWriteMessage processNextBuffer(final RandomAccessFile raf, final IReopenChannel<FileChannel> reopener, + final StoreTypeEnum storeType, final ByteBuffer clientBuffer) + throws IOException { - } catch (ClassNotFoundException e) { + final FileChannel channel = raf.getChannel(); + + final ObjectInputStream objinstr = new ObjectInputStream( + new RAFInputStream(raf)); - throw new IllegalStateException(e); + final IHAWriteMessage msg; + try { - } + msg = (IHAWriteMessage) objinstr.readObject(); - switch (m_storeType) { - case RW: { + } catch (ClassNotFoundException e) { - if (msg.getSize() > clientBuffer.capacity()) { + throw new IllegalStateException(e); - throw new IllegalStateException( - "Client buffer is not large enough for logged buffer"); + } - } + switch (storeType) { + case RW: { - // Now setup client buffer to receive from the channel - final int nbytes = msg.getSize(); - clientBuffer.position(0); - clientBuffer.limit(nbytes); + if (msg.getSize() > clientBuffer.capacity()) { - // Current position on channel. - final long pos = m_channel.position(); + throw new IllegalStateException( + "Client buffer is not large enough for logged buffer"); - // Robustly read of write cache block at that position into the - // caller's buffer. (pos=limit=nbytes) - FileChannelUtility.readAll(reopener, clientBuffer, pos); + } - // Advance the file channel beyond the block we just read. - m_channel.position(pos + msg.getSize()); - - // limit=pos; pos=0; - clientBuffer.flip(); // ready for reading + // Now setup client buffer to receive from the channel + final int nbytes = msg.getSize(); + clientBuffer.position(0); + clientBuffer.limit(nbytes); - final int chksum = new ChecksumUtility().checksum(clientBuffer - .duplicate()); + // Current position on channel. + final long pos = channel.position(); - if (chksum != msg.getChk()) - throw new ChecksumError("Expected=" + msg.getChk() - + ", actual=" + chksum); - - if (clientBuffer.remaining() != nbytes) - throw new AssertionError(); + // allow null clientBuffer for IHAWriteMessage only + if (clientBuffer != null) { + // Robustly read of write cache block at that position into the + // caller's buffer. (pos=limit=nbytes) + FileChannelUtility.readAll(reopener, clientBuffer, pos); - break; - } - case WORM: { - /* - * Note: The WriteCache block needs to be recovered from the - * WORMStrategy by the caller. - */ - break; - } - default: - throw new UnsupportedOperationException(); - } + // limit=pos; pos=0; + clientBuffer.flip(); // ready for reading - return msg; - + final int chksum = new ChecksumUtility().checksum(clientBuffer + .duplicate()); + + if (chksum != msg.getChk()) + throw new ChecksumError("Expected=" + msg.getChk() + + ", actual=" + chksum); + + if (clientBuffer.remaining() != nbytes) + throw new AssertionError(); + } + // Advance the file channel beyond the block we just read. + channel.position(pos + msg.getSize()); + + break; + } + case WORM: { + if (clientBuffer != null) { + throw new IllegalArgumentException( + "No buffer content is available of the strategy is WORM"); + } + /* + * Note: The WriteCache block needs to be recovered from the + * WORMStrategy by the caller. + */ + + // final int nbytes = msg.getSize(); + // clientBuffer.position(0); + // clientBuffer.limit(nbytes); + // + // final long address = m_addressManager.toAddr(nbytes, msg + // .getFirstOffset()); + // final ByteBuffer src = m_bufferStrategy.read(address); + // + // clientBuffer.put(src); + // } + break; + } + default: + throw new UnsupportedOperationException(); + } + + return msg; + } - - - /** - * Utility program will dump log files (or directories containing log files) - * provided as arguments. - * - * @param args - * Zero or more files or directories. - * - * @throws IOException - * @throws InterruptedException - */ - public static void main(final String[] args) throws IOException, - InterruptedException { - final IBufferAccess buf = DirectBufferPool.INSTANCE.acquire(); + /** + * Utility program will dump log files (or directories containing log files) + * provided as arguments. + * + * @param args + * Zero or more files or directories. + * + * @throws IOException + * @throws InterruptedException + */ + public static void main(final String[] args) throws IOException, + InterruptedException { - try { + final IBufferAccess buf = DirectBufferPool.INSTANCE.acquire(); - for (String arg : args) { + try { - final File file = new File(arg); + for (String arg : args) { - if (!file.exists()) { + final File file = new File(arg); - System.err.println("No such file: " + file); + if (!file.exists()) { - continue; + System.err.println("No such file: " + file); - } + continue; - if (file.isDirectory()) { + } - doDirectory(file, buf); - - } else { + if (file.isDirectory()) { - doFile(file, buf); - - } - - } + doDirectory(file, buf); - } finally { + } else { - buf.release(); + doFile(file, buf); - } + } - } + } - private static void doDirectory(final File dir, final IBufferAccess buf) - throws IOException { - - final File[] files = dir.listFiles(new FilenameFilter() { - - @Override - public boolean accept(File dir, String name) { + } finally { - if (new File(dir, name).isDirectory()) { - - // Allow recursion through directories. - return true; - - } - - return name.endsWith(HALogWriter.HA_LOG_EXT); - - } - }); + buf.release(); - for (File file : files) { + } - if(file.isDirectory()) { - - doDirectory(file, buf); - - } else { - - doFile(file, buf); - - } + } - } - - } - - private static void doFile(final File file, final IBufferAccess buf) - throws IOException { + private static void doDirectory(final File dir, final IBufferAccess buf) + throws IOException { - final HALogReader r = new HALogReader(file); + final File[] files = dir.listFiles(new FilenameFilter() { - try { + @Override + public boolean accept(File dir, String name) { - final IRootBlockView openingRootBlock = r - .getOpeningRootBlock(); + if (new File(dir, name).isDirectory()) { - final IRootBlockView closingRootBlock = r - .getClosingRootBlock(); + // Allow recursion through directories. + return true; - if (openingRootBlock.getCommitCounter() == closingRootBlock - .getCommitCounter()) { + } - System.err.println("EMPTY LOG: " + file); + return name.endsWith(HALogWriter.HA_LOG_EXT); - } + } + }); - System.out.println("----------begin----------"); - System.out.println("file=" + file); - System.out.println("openingRootBlock=" + openingRootBlock); - System.out.println("closingRootBlock=" + closingRootBlock); + for (File file : files) { - while (r.hasMoreBuffers()) { + if (file.isDirectory()) { - final IHAWriteMessage msg = r.processNextBuffer(buf - .buffer()); + doDirectory(file, buf); - System.out.println(msg.toString()); + } else { - } - System.out.println("-----------end-----------"); + doFile(file, buf); - } finally { + } - r.close(); + } - } + } - } + private static void doFile(final File file, final IBufferAccess buf) + throws IOException { + final HALogReader r = new HALogReader(file); + + try { + + final IRootBlockView openingRootBlock = r.getOpeningRootBlock(); + + final IRootBlockView closingRootBlock = r.getClosingRootBlock(); + + final boolean isWORM = openingRootBlock.getStoreType() == StoreTypeEnum.WORM; + + if (openingRootBlock.getCommitCounter() == closingRootBlock + .getCommitCounter()) { + + System.err.println("EMPTY LOG: " + file); + + } + + System.out.println("----------begin----------"); + System.out.println("file=" + file); + System.out.println("openingRootBlock=" + openingRootBlock); + System.out.println("closingRootBlock=" + closingRootBlock); + + while (r.hasMoreBuffers()) { + + // don't pass buffer in if WORM, just validate the messages + final IHAWriteMessage msg = r.processNextBuffer(isWORM ? null + : buf.buffer()); + + System.out.println(msg.toString()); + + } + System.out.println("-----------end-----------"); + + } finally { + + r.close(); + + } + + } + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogWriter.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogWriter.java 2012-10-12 21:01:58 UTC (rev 6675) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogWriter.java 2012-10-18 12:55:22 UTC (rev 6676) @@ -7,6 +7,8 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.Formatter; +import java.util.concurrent.Semaphore; +import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.log4j.Logger; @@ -15,7 +17,9 @@ import com.bigdata.io.IReopenChannel; import com.bigdata.io.SerializerUtil; import com.bigdata.journal.IRootBlockView; +import com.bigdata.journal.RootBlockUtility; import com.bigdata.journal.RootBlockView; +import com.bigdata.journal.StoreTypeEnum; import com.bigdata.rawstore.Bytes; /** @@ -83,18 +87,20 @@ /** Current write cache block sequence counter. */ private long m_nextSequence = 0; + + /** current log file. */ + private FileState m_state = null; /** current log file. */ private File m_log = null; public static final String HA_LOG_EXT = ".ha-log"; - /** current output file channel. */ - private RandomAccessFile m_raf = null; - private FileChannel m_channel = null; - /** current write point on the channel. */ private long m_position = headerSize0; + + /** number of open readers **/ + private int m_readers = 0; /** * Return the commit counter that is expected for the writes that will be @@ -121,7 +127,7 @@ private void assertOpen() { - if (m_raf == null) + if (m_state == null) throw new IllegalStateException(); } @@ -131,7 +137,7 @@ */ public File getFile() { - return m_log; + return m_state.m_log; } @@ -174,7 +180,7 @@ final long seq = m_nextSequence; - return getClass().getName() + "{" + m_raf == null ? "closed" + return getClass().getName() + "{" + m_state == null ? "closed" : "commitCounter=" + tmp.getCommitCounter() + ",nextSequence=" + seq + "}"; @@ -226,10 +232,10 @@ final String logFile = getHALogFileName(commitCounter + 1); - m_log = new File(m_dir, logFile); + final File log = new File(m_dir, logFile); // Must delete file if it exists. - if (m_log.exists() && !m_log.delete()) { + if (log.exists() && !log.delete()) { /* * It is a problem if a file exists and we can not delete it. We @@ -241,16 +247,14 @@ } - m_raf = new RandomAccessFile(m_log, "rw"); - - m_channel = m_raf.getChannel(); - + m_state = new FileState(log, rootBlock.getStoreType()); + /* * Write the MAGIC and version on the file. */ - m_raf.seek(0); - m_raf.writeInt(MAGIC); - m_raf.writeInt(VERSION1); + m_state.m_raf.seek(0); + m_state.m_raf.writeInt(MAGIC); + m_state.m_raf.writeInt(VERSION1); /* * Write root block to slots 0 and 1. @@ -276,10 +280,10 @@ @Override public FileChannel reopenChannel() throws IOException { - if (m_channel == null) + if (m_state.m_channel == null) throw new IOException("Closed"); - return m_channel; + return m_state.m_channel; } }; @@ -346,6 +350,8 @@ // // The closing root block is always in slot 1. // writeRootBlock(false/* isRootBlock0 */, rootBlock); + m_state.committed(); + close(); } @@ -442,7 +448,9 @@ default: throw new AssertionError(); } - + + // let any readers know a new record is ready + m_state.addRecord(); } /** @@ -464,8 +472,8 @@ */ private void close() throws IOException { try { - if (m_channel != null) { - m_channel.close(); + if (m_state != null) { + m_state.close(); } } finally { reset(); @@ -477,12 +485,8 @@ */ private void reset() { - m_log = null; + m_state = null; - m_raf = null; - - m_channel = null; - m_position = headerSize0; m_rootBlock = null; @@ -496,9 +500,9 @@ */ private void flush() throws IOException { - if (m_channel != null) { + if (m_state != null) { - m_channel.force(true); + m_state.m_channel.force(true); } @@ -513,14 +517,14 @@ try { - if (m_channel != null) { + if (m_state != null) { /* * Conditional remove iff file is open. Will not remove * something that has been closed. */ - m_channel.close(); + m_state.m_channel.close(); if (m_log.exists() && !m_log.delete()) { @@ -561,4 +565,170 @@ } + public IHALogReader getReader() { + + if (m_state == null) + return null; + + return new OpenHALogReader(m_state); + } + + /** + * The FileState class encapsulates the file objects shared + * by the Writer and Readers. + */ + static class FileState { + final StoreTypeEnum m_storeType; + final File m_log; + final FileChannel m_channel; + final RandomAccessFile m_raf; + final Semaphore m_entries = new Semaphore(0); + int m_records = 0; + boolean m_committed = false; + + final IReopenChannel<FileChannel> reopener = new IReopenChannel<FileChannel>() { + + @Override + public FileChannel reopenChannel() throws IOException { + + if (m_channel == null) + throw new IOException("Closed"); + + return m_channel; + + } + }; + + int m_accessors = 0; + + FileState(final File file, StoreTypeEnum storeType) throws FileNotFoundException { + m_log = file; + m_storeType = storeType; + m_raf = new RandomAccessFile(m_log, "rw"); + m_channel = m_raf.getChannel(); + m_accessors = 1; // the writer is a reader also + } + + public void close() throws IOException { + if (--m_accessors == 0) + m_channel.close(); + } + + public void addRecord() { + synchronized(this) { + m_records++; + this.notifyAll(); + } + } + + public int recordCount() { + synchronized(this) { + return m_records; + } + } + + public void committed() { + synchronized(this) { + m_committed = true; + this.notifyAll(); + } + } + + public boolean isCommitted() { + synchronized(this) { + return m_committed; + } + } + + public boolean isEmpty() { + return m_committed && m_records == 0; + } + + /** + * + * @param record - the next sequence required + */ + public void waitOnStateChange(final int record) { + synchronized (this) { + if (m_records >= record) { + return; + } + + try { + wait(); + } catch (InterruptedException e) { + // okay; + } + } + + } + + } + + static class OpenHALogReader implements IHALogReader { + final FileState m_state; + int m_record = 0; + long m_position = headerSize0; // initial position + + OpenHALogReader(FileState state) { + m_state = state; + m_state.m_accessors++; + } + + @Override + public IRootBlockView getClosingRootBlock() throws IOException { + final RootBlockUtility tmp = new RootBlockUtility(m_state.reopener, m_state.m_log, + true/* validateChecksum */, false/* alternateRootBlock */, + false/* ignoreBadRootBlock */); + + return tmp.chooseRootBlock(); + } + + @Override + public boolean hasMoreBuffers() throws IOException { + if (m_state.isCommitted() && m_state.recordCount() <= m_record) + return false; + + if (m_state.recordCount() > m_record) + return true; + + m_state.waitOnStateChange(m_record+1); + + return hasMoreBuffers(); + } + + @Override + public boolean isEmpty() { + return m_state.isEmpty(); + } + + @Override + public IHAWriteMessage processNextBuffer(ByteBuffer clientBuffer) + throws IOException { + + final IHAWriteMessage msg; + + synchronized (m_state) { + final long savePosition = m_state.m_channel.position(); + m_state.m_channel.position(m_position); + + msg = HALogReader.processNextBuffer(m_state.m_raf, m_state.reopener, m_state.m_storeType, clientBuffer); + + m_position = m_state.m_channel.position(); + m_state.m_channel.position(savePosition); + } + + m_record++; + + return msg; + } + + @Override + public void close() throws IOException { + if (m_state != null) { + m_state.close(); + } + } + + } } Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/IHALogReader.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/IHALogReader.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/IHALogReader.java 2012-10-18 12:55:22 UTC (rev 6676) @@ -0,0 +1,21 @@ +package com.bigdata.ha; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import com.bigdata.ha.msg.IHAWriteMessage; +import com.bigdata.journal.IRootBlockView; + +public interface IHALogReader { + + void close() throws IOException; + + boolean isEmpty(); + + IRootBlockView getClosingRootBlock() throws IOException; + + boolean hasMoreBuffers() throws IOException; + + IHAWriteMessage processNextBuffer(final ByteBuffer clientBuffer) throws IOException; + +} Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/IHALogReader.java ___________________________________________________________________ Added: svn:mime-type + text/plain Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-12 21:01:58 UTC (rev 6675) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-18 12:55:22 UTC (rev 6676) @@ -28,6 +28,7 @@ import java.io.IOException; import java.io.Serializable; import java.net.InetSocketAddress; +import java.nio.ByteBuffer; import java.rmi.Remote; import java.rmi.server.ExportException; import java.util.Properties; @@ -545,17 +546,36 @@ try { while (r.hasMoreBuffers()) { - + + // IHABufferStrategy + final IHABufferStrategy strategy = (IHABufferStrategy) HAJournal.this + .getBufferStrategy(); + + final boolean isWorm = strategy.getBufferMode() == BufferMode.DiskWORM ; + // get message and write cache buffer. - final IHAWriteMessage msg = r.processNextBuffer(buf - .buffer()); + // only pass in buffer if not WORM + final IHAWriteMessage msg = r.processNextBuffer(isWorm ? null : buf.buffer()); + + if (isWorm) { + // read direct from store + final ByteBuffer clientBuffer = buf.buffer(); + final int nbytes = msg.getSize(); + clientBuffer.position(0); + clientBuffer.limit(nbytes); + + final long address = strategy.toAddr(nbytes, msg + .getFirstOffset()); + final ByteBuffer src = strategy.read(address); + + clientBuffer.put(src); + } if (haLog.isDebugEnabled()) haLog.debug("req=" + req + ", msg=" + msg); // drop them into the write pipeline. - final Future<Void> ft = ((IHABufferStrategy) HAJournal.this - .getBufferStrategy()).sendHALogBuffer(req, msg, + final Future<Void> ft = strategy.sendHALogBuffer(req, msg, buf); // wait for message to make it through the pipeline. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2012-10-18 14:04:53
|
Revision: 6679 http://bigdata.svn.sourceforge.net/bigdata/?rev=6679&view=rev Author: martyncutcher Date: 2012-10-18 14:04:47 +0000 (Thu, 18 Oct 2012) Log Message: ----------- Move WORM specific HA buffer handling into WORMStrategy, and add synchronization to protect FileState in HALogWriter Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogReader.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogWriter.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogReader.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogReader.java 2012-10-18 13:21:27 UTC (rev 6678) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogReader.java 2012-10-18 14:04:47 UTC (rev 6679) @@ -338,13 +338,12 @@ break; } case WORM: { - if (clientBuffer != null) { - throw new IllegalArgumentException( - "No buffer content is available of the strategy is WORM"); - } /* * Note: The WriteCache block needs to be recovered from the - * WORMStrategy by the caller. + * WORMStrategy by the caller. The clientBuffer, if supplied, + * is ignored and untouched. + * + * It is permissible for the argument to be null. */ // final int nbytes = msg.getSize(); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogWriter.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogWriter.java 2012-10-18 13:21:27 UTC (rev 6678) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HALogWriter.java 2012-10-18 14:04:47 UTC (rev 6679) @@ -200,7 +200,7 @@ * @throws FileNotFoundException * @throws IOException */ - public void createLog(final IRootBlockView rootBlock) + public synchronized void createLog(final IRootBlockView rootBlock) throws FileNotFoundException, IOException { if (rootBlock == null) @@ -483,7 +483,7 @@ /** * Clear internal fields. */ - private void reset() { + private synchronized void reset() { m_state = null; @@ -565,7 +565,7 @@ } - public IHALogReader getReader() { + public synchronized IHALogReader getReader() { if (m_state == null) return null; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2012-10-18 13:21:27 UTC (rev 6678) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2012-10-18 14:04:47 UTC (rev 6679) @@ -43,6 +43,8 @@ import com.bigdata.counters.CounterSet; import com.bigdata.counters.Instrument; import com.bigdata.counters.striped.StripedCounters; +import com.bigdata.ha.HAPipelineGlue; +import com.bigdata.ha.QuorumPipeline; import com.bigdata.ha.QuorumRead; import com.bigdata.ha.msg.IHALogRequest; import com.bigdata.ha.msg.IHAWriteMessage; @@ -2367,13 +2369,32 @@ @Override public Future<Void> sendHALogBuffer(final IHALogRequest req, - final IHAWriteMessage msg, final IBufferAccess b) - throws IOException, InterruptedException { + final IHAWriteMessage msg, final IBufferAccess b) + throws IOException, InterruptedException { - throw new UnsupportedOperationException(); + // read direct from store + final ByteBuffer clientBuffer = b.buffer(); + final int nbytes = msg.getSize(); + clientBuffer.position(0); + clientBuffer.limit(nbytes); - } + final long address = toAddr(nbytes, msg.getFirstOffset()); + final ByteBuffer src = read(address); + clientBuffer.put(src); + + assert clientBuffer.remaining() > 0 : "Empty buffer: " + clientBuffer; + + @SuppressWarnings("unchecked") + final QuorumPipeline<HAPipelineGlue> quorumMember = (QuorumPipeline<HAPipelineGlue>) quorum + .getMember(); + + final Future<Void> remoteWriteFuture = quorumMember.replicate(req, msg, + clientBuffer); + + return remoteWriteFuture; + } + public void setExtentForLocalStore(final long extent) throws IOException, InterruptedException { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-18 13:21:27 UTC (rev 6678) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-18 14:04:47 UTC (rev 6679) @@ -551,26 +551,10 @@ final IHABufferStrategy strategy = (IHABufferStrategy) HAJournal.this .getBufferStrategy(); - final boolean isWorm = strategy.getBufferMode() == BufferMode.DiskWORM ; - // get message and write cache buffer. - // only pass in buffer if not WORM - final IHAWriteMessage msg = r.processNextBuffer(isWorm ? null : buf.buffer()); + // the buffer will be ignored if it is a WORM strategy + final IHAWriteMessage msg = r.processNextBuffer(buf.buffer()); - if (isWorm) { - // read direct from store - final ByteBuffer clientBuffer = buf.buffer(); - final int nbytes = msg.getSize(); - clientBuffer.position(0); - clientBuffer.limit(nbytes); - - final long address = strategy.toAddr(nbytes, msg - .getFirstOffset()); - final ByteBuffer src = strategy.read(address); - - clientBuffer.put(src); - } - if (haLog.isDebugEnabled()) haLog.debug("req=" + req + ", msg=" + msg); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-10-18 17:25:57
|
Revision: 6682 http://bigdata.svn.sourceforge.net/bigdata/?rev=6682&view=rev Author: thompsonbry Date: 2012-10-18 17:25:51 +0000 (Thu, 18 Oct 2012) Log Message: ----------- Fixed imports in 3 classes for the halog package refactor. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-18 16:49:31 UTC (rev 6681) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-18 17:25:51 UTC (rev 6682) @@ -28,7 +28,6 @@ import java.io.IOException; import java.io.Serializable; import java.net.InetSocketAddress; -import java.nio.ByteBuffer; import java.rmi.Remote; import java.rmi.server.ExportException; import java.util.Properties; @@ -48,9 +47,9 @@ import com.bigdata.concurrent.FutureTaskMon; import com.bigdata.ha.HAGlue; -import com.bigdata.ha.HALogReader; -import com.bigdata.ha.HALogWriter; import com.bigdata.ha.QuorumService; +import com.bigdata.ha.halog.HALogReader; +import com.bigdata.ha.halog.HALogWriter; import com.bigdata.ha.msg.HALogRootBlocksResponse; import com.bigdata.ha.msg.IHALogRequest; import com.bigdata.ha.msg.IHALogRootBlocksRequest; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-18 16:49:31 UTC (rev 6681) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-18 17:25:51 UTC (rev 6682) @@ -43,9 +43,9 @@ import com.bigdata.concurrent.FutureTaskMon; import com.bigdata.ha.HAGlue; import com.bigdata.ha.HAGlueDelegate; -import com.bigdata.ha.HALogWriter; import com.bigdata.ha.QuorumService; import com.bigdata.ha.QuorumServiceBase; +import com.bigdata.ha.halog.HALogWriter; import com.bigdata.ha.msg.HALogRequest; import com.bigdata.ha.msg.HALogRootBlocksRequest; import com.bigdata.ha.msg.IHALogRequest; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2012-10-18 16:49:31 UTC (rev 6681) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2012-10-18 17:25:51 UTC (rev 6682) @@ -56,8 +56,8 @@ import com.bigdata.bop.fed.QueryEngineFactory; import com.bigdata.counters.CounterSet; import com.bigdata.ha.HAGlue; -import com.bigdata.ha.HALogWriter; import com.bigdata.ha.QuorumService; +import com.bigdata.ha.halog.HALogWriter; import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.DumpJournal; import com.bigdata.journal.IIndexManager; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-10-19 20:55:51
|
Revision: 6685 http://bigdata.svn.sourceforge.net/bigdata/?rev=6685&view=rev Author: thompsonbry Date: 2012-10-19 20:55:44 +0000 (Fri, 19 Oct 2012) Log Message: ----------- Modified HAJournal to use an atomic decision concerning whether the live log or a historical log would be sent down the write pipeline. This decision is now made inside of the HALogWriter. Modified HAJournalServer to start the live log synchronization in addition to historical log synchronization. Modified CreateKBTask to only log when it actually has to wait for a quorum, but not when the quorum is already met. Modified the HAReceiveService to log out a stack trace if there is an error. I have seen some problems which might be related to (A) sending a message before (B) is ready (or to some other cause). Anyway, this causes the replication of the HALogs to fail when it occurs (which is not all the time). We will now see the stack trace for these errors. Modified AbstractJournal to also log the commitCounter on commit (in addition to the commitTime). https://sourceforge.net/apps/trac/bigdata/ticket/530 (Journal HA) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CreateKBTask.java branches/BIGDATA_RELEASE_1_2_0/bigdata-war/src/html/index.html Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2012-10-18 18:25:23 UTC (rev 6684) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2012-10-19 20:55:44 UTC (rev 6685) @@ -30,11 +30,11 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.Formatter; -import java.util.concurrent.Semaphore; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.log4j.Logger; +import org.eclipse.jetty.util.log.Log; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.FileChannelUtility; @@ -104,7 +104,7 @@ static final int VERSION1 = 0x1; /** HA log directory. */ - private final File m_dir; + private final File m_haLogDir; /** * The root block of the leader at the start of the current write set. @@ -165,7 +165,7 @@ final Lock lock = m_stateLock.readLock(); lock.lock(); try { - return m_state == null ? null : m_state.m_log; + return m_state == null ? null : m_state.m_haLogFile; } finally { lock.unlock(); } @@ -220,7 +220,7 @@ public HALogWriter(final File logDir) { - m_dir = logDir; + m_haLogDir = logDir; } @@ -264,7 +264,7 @@ final String logFile = getHALogFileName(commitCounter + 1); - final File log = new File(m_dir, logFile); + final File log = new File(m_haLogDir, logFile); // Must delete file if it exists. if (log.exists() && !log.delete()) { @@ -588,7 +588,7 @@ m_state.m_channel.close(); - if (m_state.m_log.exists() && !m_state.m_log.delete()) { + if (m_state.m_haLogFile.exists() && !m_state.m_haLogFile.delete()) { /* * It is a problem if a file exists and we can not delete @@ -597,7 +597,7 @@ * point. */ - throw new IOException("Could not delete: " + m_state.m_log); + throw new IOException("Could not delete: " + m_state.m_haLogFile); } @@ -628,6 +628,15 @@ } + /** + * FIXME This method is only used by the unit tests. They need to modified + * to use {@link #getReader(long)} instead. + * + * @deprecated Use {@link #getReader(long)}. That code can make an atomic + * decision about whether the current HALog is being request or + * a historical HALog. It is not possible for the caller to make + * this decision from the outside. + */ public IHALogReader getReader() { final Lock lock = m_stateLock.readLock(); @@ -642,13 +651,77 @@ } } + /** + * Return the {@link IHALogReader} for the specified commit counter. If the + * request identifies the HALog that is currently being written, then an + * {@link IHALogReader} will be returned that will "see" newly written + * entries on the HALog. If the request identifies a historical HALog that + * has been closed and which exists, then a reader will be returned for that + * HALog file. Otherwise, an exception is thrown. + * + * @param commitCounter + * The commit counter associated with the commit point at the + * close of the write set (the commit counter that is in the file + * name). + * + * @return The {@link IHALogReader}. + * + * @throws IOException + * if the commitCounter identifies an HALog file that does not + * exist or can not be read. + */ + public IHALogReader getReader(final long commitCounter) + throws IOException { + + final File logFile = new File(m_haLogDir, + HALogWriter.getHALogFileName(commitCounter)); + + final Lock lock = m_stateLock.readLock(); + lock.lock(); + try { + + if (!logFile.exists()) { + + // No log for that commit point. + throw new FileNotFoundException(logFile.getName()); + + } + + if (m_state != null + && m_rootBlock.getCommitCounter() + 1 == commitCounter) { + + /* + * This is the live HALog file. + */ + + if (haLog.isDebugEnabled()) + haLog.debug("Opening live HALog: file=" + + m_state.m_haLogFile); + + return new OpenHALogReader(m_state); + + } + + if (haLog.isDebugEnabled()) + haLog.debug("Opening historical HALog: file=" + logFile); + + return new HALogReader(logFile); + + } finally { + + lock.unlock(); + + } + + } + /** * The FileState class encapsulates the file objects shared by the Writer * and Readers. */ static class FileState { final StoreTypeEnum m_storeType; - final File m_log; + final File m_haLogFile; final FileChannel m_channel; final RandomAccessFile m_raf; int m_records = 0; @@ -671,9 +744,9 @@ FileState(final File file, StoreTypeEnum storeType) throws FileNotFoundException { - m_log = file; + m_haLogFile = file; m_storeType = storeType; - m_raf = new RandomAccessFile(m_log, "rw"); + m_raf = new RandomAccessFile(m_haLogFile, "rw"); m_channel = m_raf.getChannel(); m_accessors = 1; // the writer is a reader also } @@ -750,7 +823,7 @@ @Override public IRootBlockView getClosingRootBlock() throws IOException { final RootBlockUtility tmp = new RootBlockUtility(m_state.reopener, - m_state.m_log, true/* validateChecksum */, + m_state.m_haLogFile, true/* validateChecksum */, false/* alternateRootBlock */, false/* ignoreBadRootBlock */); return tmp.chooseRootBlock(); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java 2012-10-18 18:25:23 UTC (rev 6684) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java 2012-10-19 20:55:44 UTC (rev 6685) @@ -402,7 +402,7 @@ try { readFuture.get(); } catch (Exception e) { - log.warn(e); + log.warn(e,e); } lock.lockInterruptibly(); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-10-18 18:25:23 UTC (rev 6684) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-10-19 20:55:44 UTC (rev 6685) @@ -4922,7 +4922,9 @@ _commitRecord = _getCommitRecord(); if (txLog.isInfoEnabled()) - txLog.info("COMMIT: commitTime=" + commitTime); + txLog.info("COMMIT: commitCounter=" + + rootBlock.getCommitCounter() + ", commitTime=" + + commitTime); } finally { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-18 18:25:23 UTC (rev 6684) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-19 20:55:44 UTC (rev 6685) @@ -50,6 +50,7 @@ import com.bigdata.ha.QuorumService; import com.bigdata.ha.halog.HALogReader; import com.bigdata.ha.halog.HALogWriter; +import com.bigdata.ha.halog.IHALogReader; import com.bigdata.ha.msg.HALogRootBlocksResponse; import com.bigdata.ha.msg.IHALogRequest; import com.bigdata.ha.msg.IHALogRootBlocksRequest; @@ -505,18 +506,13 @@ // The commit counter of the desired closing root block. final long commitCounter = req.getCommitCounter(); - final File logFile = new File(haLogDir, - HALogWriter.getHALogFileName(commitCounter)); + /* + * Note: The choice of the "live" versus a historical "closed" log + * file needs to be an atomic decision and thus MUST be made by the + * HALogManager. + */ + final IHALogReader r = getHALogWriter().getReader(commitCounter); - if (!logFile.exists()) { - - // No log for that commit point. - throw new FileNotFoundException(logFile.getName()); - - } - - final HALogReader r = new HALogReader(logFile); - final FutureTask<Void> ft = new FutureTaskMon<Void>( new SendHALogTask(req, r)); @@ -526,12 +522,17 @@ } + /** + * Class sends the {@link IHAWriteMessage}s and {@link WriteCache} + * buffer contents along the write pipeline for the requested HALog + * file. + */ private class SendHALogTask implements Callable<Void> { private final IHALogRequest req; - private final HALogReader r; + private final IHALogReader r; - public SendHALogTask(final IHALogRequest req, final HALogReader r) { + public SendHALogTask(final IHALogRequest req, final IHALogReader r) { this.req = req; this.r = r; @@ -542,6 +543,7 @@ final IBufferAccess buf = DirectBufferPool.INSTANCE.acquire(); + long nsent = 0; try { while (r.hasMoreBuffers()) { @@ -564,8 +566,13 @@ // wait for message to make it through the pipeline. ft.get(); + nsent++; + } + if (haLog.isDebugEnabled()) + haLog.debug("req=" + req + ", nsent=" + nsent); + return null; } finally { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-18 18:25:23 UTC (rev 6684) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-19 20:55:44 UTC (rev 6685) @@ -994,6 +994,10 @@ log.error(t, t); + } finally { + + haLog.warn("RESYNCH: exit."); + } return null; @@ -1063,7 +1067,8 @@ InterruptedException, ExecutionException { if (haLog.isInfoEnabled()) - haLog.info("RESYNC: commitCounter=" + commitCounter); + haLog.info("RESYNC: now replicating commitCounter=" + + commitCounter); final IHALogRootBlocksResponse resp; try { @@ -1116,8 +1121,8 @@ // root block when the quorum started that write set. final IRootBlockView openRootBlock = resp.getOpenRootBlock(); - // root block when the quorum committed that write set. - final IRootBlockView closeRootBlock = resp.getCloseRootBlock(); +// // root block when the quorum committed that write set. +// final IRootBlockView closeRootBlock = resp.getCloseRootBlock(); if (openRootBlock.getCommitCounter() != commitCounter - 1) { @@ -1133,22 +1138,22 @@ + openRootBlock); } - if (openRootBlock.getCommitCounter() == closeRootBlock - .getCommitCounter()) { - - /* - * FIXME RESYNC : This is not an error condition. The quorum - * is still writing on the HA Log file for the current write - * set. However, we do not yet have code that will let us - * read on a log file that is currently being written. - */ - - throw new AssertionError( - "Write set is not closed: requested commitCounter=" - + commitCounter); +// if (openRootBlock.getCommitCounter() == closeRootBlock +// .getCommitCounter()) { +// +// /* +// * FIXME RESYNC : This is not an error condition. The quorum +// * is still writing on the HA Log file for the current write +// * set. However, we do not yet have code that will let us +// * read on a log file that is currently being written. +// */ +// +// throw new AssertionError( +// "Write set is not closed: requested commitCounter=" +// + commitCounter); +// +// } - } - /* * If the local journal is empty, then we need to replace both * of it's root blocks with the opening root block. @@ -1206,6 +1211,52 @@ } + /* + * Figure out the closing root block. If this HALog file was + * active when we started reading from it, then the open and + * close root blocks would have been identical in the [resp] and + * we will need to grab the root blocks again now that it has + * been closed. + */ + final IRootBlockView closeRootBlock; + { + + // root block when the quorum committed that write set. + IRootBlockView tmp = resp.getCloseRootBlock(); + + if (openRootBlock.getCommitCounter() == tmp + .getCommitCounter()) { + + /* + * The open and close commit counters were the same when + * we first requested them, so we need to re-request the + * close commit counter now that we are done reading on + * the file. + */ + + // Re-request the root blocks for the write set. + final IHALogRootBlocksResponse resp2 = leader + .getHALogRootBlocksForWriteSet(new HALogRootBlocksRequest( + commitCounter)); + + tmp = resp2.getCloseRootBlock(); + + } + + closeRootBlock = tmp; + + if (closeRootBlock.getCommitCounter() != commitCounter) { + + throw new AssertionError( + "Wrong commitCounter for closing root block: expected commitCounter=" + + commitCounter + + ", but closeRootBlock=" + + closeRootBlock); + + } + + } + // Local commit. journal.doLocalCommit( (QuorumService<HAGlue>) HAQuorumService.this, @@ -1220,6 +1271,10 @@ logLock.unlock(); } + if (haLog.isInfoEnabled()) + haLog.info("RESYNC: caught up to commitCounter=" + + commitCounter); + } } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CreateKBTask.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CreateKBTask.java 2012-10-18 18:25:23 UTC (rev 6684) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CreateKBTask.java 2012-10-19 20:55:44 UTC (rev 6685) @@ -82,8 +82,14 @@ final long token; try { - log.warn("Awaiting quorum."); - token = quorum.awaitQuorum(); + long tmp = quorum.token(); + if (tmp == Quorum.NO_QUORUM) { + // Only log if we are going to wait. + log.warn("Awaiting quorum."); + tmp = quorum.awaitQuorum(); + } + token = tmp; + assert token != Quorum.NO_QUORUM; } catch (AsynchronousQuorumCloseException e1) { throw new RuntimeException(e1); } catch (InterruptedException e1) { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-war/src/html/index.html =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-war/src/html/index.html 2012-10-18 18:25:23 UTC (rev 6684) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-war/src/html/index.html 2012-10-19 20:55:44 UTC (rev 6685) @@ -52,6 +52,7 @@ > Analytic <INPUT type="checkbox" name="xhtml" value="true" title="Request XHTML response (results formatted as table)." + checked="checked" > XHTML </P> </FORM> @@ -76,6 +77,7 @@ > Analytic <INPUT type="checkbox" name="monitor" value="true" title="Monitor the execution of the UPDATE request." + checked="checked" > Monitor </P> </FORM> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-10-23 22:24:30
|
Revision: 6686 http://bigdata.svn.sourceforge.net/bigdata/?rev=6686&view=rev Author: thompsonbry Date: 2012-10-23 22:24:23 +0000 (Tue, 23 Oct 2012) Log Message: ----------- - private loggers in the QuorumXXXImpl classes. - fixed HALog replay for WORM, adding private raw read with zero copy IO. - changed logger to INFO in AbstractJournal. - javadoc and comment only changes to HAJournalServer. - javadoc on IHABufferStrategy. - Removed FIXME in RWStore. @see https://sourceforge.net/apps/trac/bigdata/ticket/530 (Journal HA) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumReadImpl.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java 2012-10-19 20:55:44 UTC (rev 6685) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java 2012-10-23 22:24:23 UTC (rev 6686) @@ -56,7 +56,7 @@ QuorumStateChangeListenerBase implements QuorumCommit<S>, QuorumStateChangeListener { - static protected transient final Logger log = Logger + static private transient final Logger log = Logger .getLogger(QuorumCommitImpl.class); protected final QuorumMember<S> member; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2012-10-19 20:55:44 UTC (rev 6685) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2012-10-23 22:24:23 UTC (rev 6686) @@ -153,7 +153,7 @@ QuorumStateChangeListenerBase implements QuorumPipeline<S>, QuorumStateChangeListener { - static protected transient final Logger log = Logger + static private transient final Logger log = Logger .getLogger(QuorumPipelineImpl.class); /** @@ -171,7 +171,7 @@ */ private final ReentrantLock lock = new ReentrantLock(); - /** send service for the leader. */ + /** send service (iff this is the leader). */ private HASendService sendService; /** @@ -404,14 +404,16 @@ */ public void pipelineChange(final UUID oldDownStreamId, final UUID newDownStreamId) { - if (log.isInfoEnabled()) - log.info(""); super.pipelineChange(oldDownStreamId, newDownStreamId); lock.lock(); try { // The address of the next service in the pipeline. final InetSocketAddress addrNext = newDownStreamId == null ? null : getAddrNext(newDownStreamId); + if (log.isInfoEnabled()) + log.info("oldDownStreamId=" + oldDownStreamId + + ",newDownStreamId=" + newDownStreamId + ", addrNext=" + + addrNext); if (sendService != null) { // Terminate the existing connection. sendService.terminate(); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumReadImpl.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumReadImpl.java 2012-10-19 20:55:44 UTC (rev 6685) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumReadImpl.java 2012-10-23 22:24:23 UTC (rev 6686) @@ -48,7 +48,7 @@ public class QuorumReadImpl<S extends HAReadGlue> extends QuorumStateChangeListenerBase implements QuorumRead<S> { - static protected transient final Logger log = Logger + static private transient final Logger log = Logger .getLogger(QuorumReadImpl.class); protected final QuorumMember<S> member; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-10-19 20:55:44 UTC (rev 6685) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-10-23 22:24:23 UTC (rev 6686) @@ -4898,7 +4898,7 @@ */ if (haLog.isInfoEnabled()) - haLog.error("Reset from root block: serviceUUID=" + haLog.info("Reset from root block: serviceUUID=" + localService.getServiceId()); ((IHABufferStrategy) _bufferStrategy) Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java 2012-10-19 20:55:44 UTC (rev 6685) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java 2012-10-23 22:24:23 UTC (rev 6686) @@ -62,7 +62,8 @@ * @param msg * The {@link IHAWriteMessage}. * @param b - * The raw buffer. + * The raw buffer. Bytes from position to limit will be sent. + * remaining() must equal {@link IHAWriteMessage#getSize()}. * * @return The {@link Future} for that request. * Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2012-10-19 20:55:44 UTC (rev 6685) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2012-10-23 22:24:23 UTC (rev 6686) @@ -1389,45 +1389,17 @@ * Note: Strip off the checksum from the end of the record and validate * it. */ - final Lock readLock = extensionLock.readLock(); - readLock.lock(); - try { + { + final long beginDisk = System.nanoTime(); // Allocate a new buffer of the exact capacity. final ByteBuffer dst = ByteBuffer.allocate(nbytes); // Read through to the disk. - final long beginDisk = System.nanoTime(); + readRaw(nbytes, offset, dst); - try { + if (useChecksums) { - // the offset into the disk file. - final long pos = headerSize + offset; - - // read on the disk. - final int ndiskRead = FileChannelUtility.readAll(opener, dst, - pos); - - // update performance counters. - final StoreCounters<?> c = (StoreCounters<?>) storeCounters - .get().acquire(); - try { - c.ndiskRead += ndiskRead; - } finally { - c.release(); - } - - } catch (IOException ex) { - - throw new RuntimeException(ex); - - } - - // flip for reading. - dst.flip(); - - if(useChecksums) { - // extract the checksum. final int chk = dst.getInt(nbytes - 4); @@ -1461,15 +1433,66 @@ // return the buffer. return dst; + + } + } + + /** + * Read on the backing file. + * + * @param nbytes + * The #of bytes to read. + * @param offset + * The offset of the first byte (relative to the start of the + * data region). + * @param dst + * Where to put the data. Bytes will be written at position until + * limit. + * @return The caller's buffer, prepared for reading. + */ + private ByteBuffer readRaw(final int nbytes, final long offset, + final ByteBuffer dst) { + + final Lock readLock = extensionLock.readLock(); + readLock.lock(); + try { + + try { + + // the offset into the disk file. + final long pos = headerSize + offset; + + // read on the disk. + final int ndiskRead = FileChannelUtility.readAll(opener, dst, + pos); + + // update performance counters. + final StoreCounters<?> c = (StoreCounters<?>) storeCounters + .get().acquire(); + try { + c.ndiskRead += ndiskRead; + } finally { + c.release(); + } + + } catch (IOException ex) { + + throw new RuntimeException(ex); + + } + + // flip for reading. + dst.flip(); + + return dst; } finally { readLock.unlock(); - } } - + /** * Used to re-open the {@link FileChannel} in this class. */ @@ -2378,11 +2401,8 @@ clientBuffer.position(0); clientBuffer.limit(nbytes); - final long address = toAddr(nbytes, msg.getFirstOffset()); - final ByteBuffer src = read(address); - - clientBuffer.put(src); - + readRaw(nbytes, msg.getFirstOffset(), clientBuffer); + assert clientBuffer.remaining() > 0 : "Empty buffer: " + clientBuffer; @SuppressWarnings("unchecked") Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2012-10-19 20:55:44 UTC (rev 6685) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2012-10-23 22:24:23 UTC (rev 6686) @@ -5039,20 +5039,6 @@ true/* useChecksums */, true/* bufferHasData */, m_reopener, msg.getFileExtent()); - /* - * FIXME We need to update the allocators either here based on that - * RecordMap. Expose it via a read-only interface and then mock up the - * bits in the appropriate allocators. - * - * Determining FixedAllocators from sequentially allocated addesses is - * not straightforward. However, if we can assume that allocations are - * made sequentially, and we know the slot size of the allocation, then it - * may be possible to infer the FixedAllocation requirements and therefore - * any rw-native address. - * - * - */ - /* * Setup buffer for writing. We receive the buffer with pos=0, \xCA * limit=#ofbyteswritten. However, flush() expects pos=limit, will Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2012-10-19 20:55:44 UTC (rev 6685) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2012-10-23 22:24:23 UTC (rev 6686) @@ -100,7 +100,7 @@ */ public class TestWORMWriteCacheService extends TestCase3 { - protected static final Logger log = Logger.getLogger + private static final Logger log = Logger.getLogger ( TestWORMWriteCacheService.class ); @@ -294,9 +294,10 @@ nreceived.incrementAndGet(); - if (log.isTraceEnabled()) - log.trace("nreceived=" + nreceived + ", message=" + msg - + ", data=" + data); + if (TestWORMWriteCacheService.log.isTraceEnabled()) + TestWORMWriteCacheService.log.trace("nreceived=" + + nreceived + ", message=" + msg + ", data=" + + data); final ChecksumUtility chk = ChecksumUtility.threadChk.get(); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-19 20:55:44 UTC (rev 6685) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-23 22:24:23 UTC (rev 6686) @@ -1121,9 +1121,6 @@ // root block when the quorum started that write set. final IRootBlockView openRootBlock = resp.getOpenRootBlock(); -// // root block when the quorum committed that write set. -// final IRootBlockView closeRootBlock = resp.getCloseRootBlock(); - if (openRootBlock.getCommitCounter() != commitCounter - 1) { /* @@ -1138,22 +1135,6 @@ + openRootBlock); } -// if (openRootBlock.getCommitCounter() == closeRootBlock -// .getCommitCounter()) { -// -// /* -// * FIXME RESYNC : This is not an error condition. The quorum -// * is still writing on the HA Log file for the current write -// * set. However, we do not yet have code that will let us -// * read on a log file that is currently being written. -// */ -// -// throw new AssertionError( -// "Write set is not closed: requested commitCounter=" -// + commitCounter); -// -// } - /* * If the local journal is empty, then we need to replace both * of it's root blocks with the opening root block. @@ -1323,7 +1304,15 @@ log.info("Ignoring message: " + msg); /* - * Drop the pipeline message. We can't log it yet. + * Drop the pipeline message. + * + * Note: There are two cases here. + * + * (A) It is a historical message that is being ignored on + * this node; + * + * (B) It is a live message, but this node is not caught up + * and therefore can not log the message yet. */ } @@ -1339,12 +1328,12 @@ /** * Adjust the size on the disk of the local store to that given in the * message. - * - * Note: DO NOT do this for historical messages! - * - * @throws IOException - * - * @todo Trap truncation vs extend? + * <p> + * Note: When historical messages are being replayed, the caller needs + * to decide whether the message should applied to the local store. If + * so, then the extent needs to be updated. If not, then the message + * should be ignored (it will already have been replicated to the next + * follower). */ private void setExtent(final IHAWriteMessage msg) throws IOException { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-10-25 18:01:47
|
Revision: 6688 http://bigdata.svn.sourceforge.net/bigdata/?rev=6688&view=rev Author: thompsonbry Date: 2012-10-25 18:01:34 +0000 (Thu, 25 Oct 2012) Log Message: ----------- Working through resynchronization protocol with Martyn for the HA Journal. At this point, a third service can be brought up and it will synchronize with the quorum, vote the lastCommitTime of the leader, and then enter the met quorum. - refactored WORMStrategy to allow raw read against the file to support replicating historical write cache messages. - Added code to sync and join a met quorum when nobody is writing on the leader. - Modified BigdataServlet to recognize when a quorum is met but the service is not part of the met quorum. - Refactored AbstractJournal to lift out the code that runs for the 2-phase commit protocol into Runnable/Callable classes. This provides better visibility in stack traces. - Modified the 2-phase commit protocol. The AbstractJournal now notices whether or not it voted to commit when it received the prepare message. It examines this boolean flag when it receives the commit message. If it did not vote to prepare, then it will not do the commit. This ensures that non-joined services ignore commit messages. - Refactored the HAJournal.config files used for testing to no longer use UUIDs in their service directory path names. The HAJournal.config file was also renamed to HAJournal-A.config for better symmetry among these files. Next steps: - A bug has been observed where starting A then B and finally C can fail to correctly tear down the write pipeline in pipelineChange() on B. It appears that the logic gets stuck in HASendService.terminate(). This needs more investigation. - I have not yet tested a catch up and join under a sustained write work load. This should be Ok, but it does need to be tested. - Service rebuild (disaster recovery). - Quorum pipeline member proxy patterns to support live backup and offset replication. - Disaster recovery from live backups. - CI for HA. @see https://sourceforge.net/apps/trac/bigdata/ticket/530 (Journal HA) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config Removed Paths: ------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.config Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java 2012-10-24 13:17:54 UTC (rev 6687) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java 2012-10-25 18:01:34 UTC (rev 6688) @@ -307,6 +307,13 @@ } done = true; } catch (ExecutionException ex) { + /* + * TODO prepare2Phase() is throwing exceptions if + * preconditions are violated. Unless if is a joined + * service, it probably should just vote "no" instead. We do + * not need to log @ ERROR when a precondition for a + * non-joined service has been violated. + */ log.error(ex, ex); } finally { if (!done) { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java 2012-10-24 13:17:54 UTC (rev 6687) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java 2012-10-25 18:01:34 UTC (rev 6688) @@ -210,6 +210,8 @@ tmp.shutdownNow(); // clear address. addr.set(null); + if (log.isInfoEnabled()) + log.info(toString()); } } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-10-24 13:17:54 UTC (rev 6687) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-10-25 18:01:34 UTC (rev 6688) @@ -48,6 +48,7 @@ import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -4988,6 +4989,11 @@ * The most recent prepare request. */ private final AtomicReference<IHA2PhasePrepareMessage> prepareRequest = new AtomicReference<IHA2PhasePrepareMessage>(); + + /** + * Whether or not we voted "yes" for the last prepare request. + */ + private final AtomicBoolean vote = new AtomicBoolean(false); @Override public UUID getServiceId() { @@ -5046,59 +5052,26 @@ + isRootBlock0 + ", rootBlock=" + rootBlock + ", timeout=" + timeout + ", unit=" + unit); - if (!rootBlock.getUUID().equals( - AbstractJournal.this._rootBlock.getUUID())) { - - /* - * The root block has a different UUID. We can not accept this - * condition. - */ - - throw new IllegalStateException(); - - } - - if (rootBlock.getLastCommitTime() <= AbstractJournal.this._rootBlock - .getLastCommitTime()) { - - /* - * The root block has a commit time that is LTE the most recent - * commit on this Journal. We can not accept this condition. - */ - - throw new IllegalStateException(); - - } - - if (rootBlock.getCommitCounter() <= AbstractJournal.this._rootBlock - .getCommitCounter()) { - - /* - * The root block has a commit counter that is LTE the most - * recent commit counter on this Journal. We can not accept this - * condition. - */ - - throw new IllegalStateException(); - - } - // the quorum token from the leader is in the root block. final long prepareToken = rootBlock.getQuorumToken(); - + + // Do not prepare if the token is wrong. quorum.assertQuorum(prepareToken); // Save off a reference to the prepare request. prepareRequest.set(prepareMessage); + + // Clear vote (assume NO unless proven otherwise). + vote.set(false); final QuorumService<HAGlue> quorumService = quorum.getClient(); // Note: as decided by the leader! - final boolean isJoined = prepareMessage.isJoinedService(); - - // true the token is valid and this service is the quorum leader - final boolean isLeader = quorumService.isLeader(prepareToken); + final boolean isJoined = prepareMessage.isJoinedService(); + // true the token is valid and this service is the quorum leader + final boolean isLeader = quorumService.isLeader(prepareToken); + final FutureTask<Boolean> ft; if (!isJoined) { @@ -5107,14 +5080,8 @@ * A NOP task if this service is not joined with the met quorum. */ - ft = new FutureTaskMon<Boolean>(new Runnable() { + ft = new FutureTaskMon<Boolean>(new VoteNoTask()); - public void run() { - - }; - - }, true/* yes */); - } else { /* @@ -5122,56 +5089,12 @@ * met quorum. */ - ft = new FutureTaskMon<Boolean>(new Runnable() { + ft = new FutureTaskMon<Boolean>(new Prepare2PhaseTask( + prepareMessage) { + }); - public void run() { + } - final IRootBlockView rootBlock = prepareMessage.getRootBlock(); - - if (haLog.isInfoEnabled()) - haLog.info("preparedRequest=" + rootBlock); - - if (rootBlock == null) - throw new IllegalStateException(); - - quorum.assertQuorum(prepareToken); - - /* - * Call to ensure strategy does everything required for - * itself before final root block commit. At a minimum - * it must flush its write cache to the backing file - * (issue the writes). - */ - // _bufferStrategy.commit(); // lifted to before we - // retrieve - // RootBlock in commitNow - /* - * Force application data to stable storage _before_ we - * update the root blocks. This option guarantees that - * the application data is stable on the disk before the - * atomic commit. Some operating systems and/or file - * systems may otherwise choose an ordered write with - * the consequence that the root blocks are laid down on - * the disk before the application data and a hard - * failure could result in the loss of application data - * addressed by the new root blocks (data loss on - * restart). - * - * Note: We do not force the file metadata to disk. If - * that is done, it will be done by a force() after we - * write the root block on the disk. - */ - if (doubleSync) { - - _bufferStrategy.force(false/* metadata */); - - } - - } - }, true/* vote=yes */); - - } - if(isLeader) { /* @@ -5203,124 +5126,146 @@ return getProxy(ft); - } + } - @Override - public Future<Void> commit2Phase( - final IHA2PhaseCommitMessage commitMessage) { + /** + * Task votes NO (unconditional). + * <p> + * Note: If we were not joined at the start of the 2-phase commit, then + * we will not participate. This provides an atomic decision point with + * respect to when a service that is rebuilding or resynchronizing will + * participate in a 2-phase commit. By voting NO here, the + * commit2Phase() operation will be a NOP for THIS service. + * <p> + * Note: The vote of a service that was not joined with the met quorum + * at the time that we begin the 2-phase commit protocol is ignored. + */ + private class VoteNoTask implements Callable<Boolean>{ - final long commitTime = commitMessage.getCommitTime(); - - if (haLog.isInfoEnabled()) - haLog.info("commitTime=" + commitTime); + public Boolean call() throws Exception { - final FutureTask<Void> ft = new FutureTaskMon<Void>(new Runnable() { - - public void run() { + // Vote NO. + vote.set(false); + + return vote.get(); - if (haLog.isInfoEnabled()) - haLog.info("commitTime=" + commitTime); + } - final IHA2PhasePrepareMessage prepareMessage = prepareRequest.get(); + } // class VoteNoTask - if (prepareMessage == null) - throw new IllegalStateException(); + /** + * Task prepares for a 2-phase commit (syncs to the disk) and votes YES + * iff if is able to prepare successfully. + */ + private class Prepare2PhaseTask implements Callable<Boolean> { - final IRootBlockView rootBlock = prepareMessage - .getRootBlock(); + private final IHA2PhasePrepareMessage prepareMessage; - if (rootBlock == null) - throw new IllegalStateException(); + public Prepare2PhaseTask(final IHA2PhasePrepareMessage prepareMessage) { - _fieldReadWriteLock.writeLock().lock(); + if (prepareMessage == null) + throw new IllegalArgumentException(); + + this.prepareMessage = prepareMessage; + + } + + public Boolean call() throws Exception { - try { + final IRootBlockView rootBlock = prepareMessage.getRootBlock(); - if (rootBlock.getLastCommitTime() != commitTime) { - /* - * The commit time does not agree with the root - * block from the prepare message. - */ - throw new IllegalStateException(); - } + if (haLog.isInfoEnabled()) + haLog.info("preparedRequest=" + rootBlock); - // verify that the qourum has not changed. - quorum.assertQuorum(rootBlock.getQuorumToken()); + if (rootBlock == null) + throw new IllegalStateException(); - final QuorumService<HAGlue> localService = quorum - .getClient(); + if (!rootBlock.getUUID().equals( + AbstractJournal.this._rootBlock.getUUID())) { - if (prepareMessage.isJoinedService()) { + /* + * The root block has a different UUID. We can not accept this + * condition. + */ - /* - * Only the services that are joined go through the - * commit protocol. - */ + throw new IllegalStateException(); - AbstractJournal.this.doLocalCommit(localService, - rootBlock); + } - } // if(isJoinedService) - - try { + if (rootBlock.getLastCommitTime() <= AbstractJournal.this._rootBlock + .getLastCommitTime()) { - /* - * Write the root block on the HA log file, closing - * out that file. - */ - - localService.logRootBlock(rootBlock); - - } catch (IOException e) { - /* - * We have already committed. - * - * This HA log file will be unusable if we have to - * replay the write set to resynchronize some other - * service. However, it is still possible to obtain - * the HA log file from some other service in the - * qourum. If all else fails, the hypothetical - * service can be rebuilt from scratch. - */ - haLog.error("UNABLE TO SEAL HA LOG FILE WITH ROOT BLOCK: " - + getServiceId() - + ", rootBlock=" - + rootBlock); - } + /* + * The root block has a commit time that is LTE the most recent + * commit on this Journal. We can not accept this condition. + */ - if (quorum.isQuorumFullyMet(rootBlock.getQuorumToken())) { + throw new IllegalStateException(); - /* - * The HA log files are purged on each node any time - * the quorum is fully met and goes through a commit - * point. - */ + } - localService.purgeHALogs(true/* includeCurrent */); + if (rootBlock.getCommitCounter() <= AbstractJournal.this._rootBlock + .getCommitCounter()) { - } + /* + * The root block has a commit counter that is LTE the most + * recent commit counter on this Journal. We can not accept this + * condition. + */ - } catch(Throwable t) { - - haLog.error("ERROR IN 2-PHASE COMMIT: " + t - + ", rootBlock=" + rootBlock, t); + throw new IllegalStateException(); + + } - quorum.getActor().serviceLeave(); + // the quorum token from the leader is in the root block. + final long prepareToken = rootBlock.getQuorumToken(); - throw new RuntimeException(t); - - } finally { + quorum.assertQuorum(prepareToken); - // Discard the prepare request. - prepareRequest.set(null/* discard */); + /* + * Call to ensure strategy does everything required for itself + * before final root block commit. At a minimum it must flush + * its write cache to the backing file (issue the writes). + */ + // _bufferStrategy.commit(); // lifted to before we + // retrieve + // RootBlock in commitNow + /* + * Force application data to stable storage _before_ we update + * the root blocks. This option guarantees that the application + * data is stable on the disk before the atomic commit. Some + * operating systems and/or file systems may otherwise choose an + * ordered write with the consequence that the root blocks are + * laid down on the disk before the application data and a hard + * failure could result in the loss of application data + * addressed by the new root blocks (data loss on restart). + * + * Note: We do not force the file metadata to disk. If that is + * done, it will be done by a force() after we write the root + * block on the disk. + */ + if (doubleSync) { - _fieldReadWriteLock.writeLock().unlock(); + _bufferStrategy.force(false/* metadata */); - } + } - } - }, null/* Void */); + // Vote YES. + vote.set(true); + return vote.get(); + + } + + } + + @Override + public Future<Void> commit2Phase( + final IHA2PhaseCommitMessage commitMessage) { + + final FutureTask<Void> ft = new FutureTaskMon<Void>( + new Commit2PhaseTask(commitMessage), null/* Void */); + /* * Run in the caller's thread. * @@ -5333,31 +5278,152 @@ return getProxy(ft); } + + /** + * 2-Phase commit (service must have voted YES for the 2-phase prepare). + */ + private class Commit2PhaseTask implements Runnable { - @Override - public Future<Void> abort2Phase(final IHA2PhaseAbortMessage abortMessage) { + private final IHA2PhaseCommitMessage commitMessage; - final long token = abortMessage.getQuorumToken(); - - if (haLog.isInfoEnabled()) - haLog.info("token=" + token); + public Commit2PhaseTask(final IHA2PhaseCommitMessage commitMessage) { - final FutureTask<Void> ft = new FutureTaskMon<Void>(new Runnable() { - public void run() { + if (commitMessage == null) + throw new IllegalArgumentException(); + this.commitMessage = commitMessage; + + } + + public void run() { + + _fieldReadWriteLock.writeLock().lock(); + + try { + + final long commitTime = commitMessage.getCommitTime(); + if (haLog.isInfoEnabled()) - haLog.info("token=" + token); + haLog.info("commitTime=" + commitTime + ", vote=" + + vote); + + if (!vote.get()) { + + /* + * This service voted NO. It will not participate in + * the commit. + */ + + return; - quorum.assertQuorum(token); + } + + final IHA2PhasePrepareMessage prepareMessage = prepareRequest.get(); + if (prepareMessage == null) + throw new IllegalStateException(); + + final IRootBlockView rootBlock = prepareMessage + .getRootBlock(); + + if (rootBlock == null) + throw new IllegalStateException(); + + if (rootBlock.getLastCommitTime() != commitTime) { + /* + * The commit time does not agree with the root + * block from the prepare message. + */ + throw new IllegalStateException(); + } + + // verify that the qourum has not changed. + quorum.assertQuorum(rootBlock.getQuorumToken()); + + final QuorumService<HAGlue> localService = quorum + .getClient(); + + if (prepareMessage.isJoinedService()) { + + /* + * Only the services that are joined go through the + * commit protocol. + */ + + AbstractJournal.this.doLocalCommit(localService, + rootBlock); + + } // if(isJoinedService) + + try { + + /* + * Write the root block on the HA log file, closing + * out that file. + */ + + localService.logRootBlock(rootBlock); + + } catch (IOException e) { + /* + * We have already committed. + * + * This HA log file will be unusable if we have to + * replay the write set to resynchronize some other + * service. However, it is still possible to obtain + * the HA log file from some other service in the + * qourum. If all else fails, the hypothetical + * service can be rebuilt from scratch. + */ + haLog.error("UNABLE TO SEAL HA LOG FILE WITH ROOT BLOCK: " + + getServiceId() + + ", rootBlock=" + + rootBlock); + } + + if (quorum.isQuorumFullyMet(rootBlock.getQuorumToken())) { + + /* + * The HA log files are purged on each node any time + * the quorum is fully met and goes through a commit + * point. + */ + + localService.purgeHALogs(true/* includeCurrent */); + + } + + } catch(Throwable t) { + + haLog.error("ERROR IN 2-PHASE COMMIT: " + t + + ", rootBlock=" + prepareRequest.get(), t); + + quorum.getActor().serviceLeave(); + + throw new RuntimeException(t); + + } finally { + // Discard the prepare request. prepareRequest.set(null/* discard */); - _abort(); + // Discard the vote. + vote.set(false); + _fieldReadWriteLock.writeLock().unlock(); + } - }, null/* Void */); + } + + } + + @Override + public Future<Void> abort2Phase(final IHA2PhaseAbortMessage abortMessage) { + + final FutureTask<Void> ft = new FutureTaskMon<Void>( + new Abort2PhaseTask(abortMessage), null/* Void */); + /* * Run in the caller's thread. * @@ -5369,9 +5435,45 @@ return getProxy(ft); - } + } /** + * 2-Phase abort. + */ + private class Abort2PhaseTask implements Runnable { + + private final IHA2PhaseAbortMessage abortMessage; + + public Abort2PhaseTask(final IHA2PhaseAbortMessage abortMessage) { + + if (abortMessage == null) + throw new IllegalArgumentException(); + + this.abortMessage = abortMessage; + } + + public void run() { + + final long token = abortMessage.getQuorumToken(); + + if (haLog.isInfoEnabled()) + haLog.info("token=" + token); + + quorum.assertQuorum(token); + + // Discard the prepare request. + prepareRequest.set(null/* discard */); + + // Discard the vote. + vote.set(false); + + _abort(); + + } + + } + + /** * {@inheritDoc} * * @todo We should test the LRUNexus for failover reads and install @@ -5434,6 +5536,9 @@ } + /* + * Delegated to HAQuorumService. + */ @Override public Future<Void> receiveAndReplicate(final IHALogRequest req, final IHAWriteMessage msg) throws IOException { @@ -5441,35 +5546,11 @@ if (haLog.isDebugEnabled()) haLog.debug("req=" + req + ", msg=" + msg); -// if (req == null) { -// -// /* -// * Adjust the size on the disk of the local store to that given -// * in the message. -// * -// * Note: DO NOT do this for historical messages! -// * -// * @todo Trap truncation vs extend? -// */ -// -// try { -// -// ((IHABufferStrategy) AbstractJournal.this._bufferStrategy) -// .setExtentForLocalStore(msg.getFileExtent()); -// -// } catch (InterruptedException e) { -// -// throw new RuntimeException(e); -// -// } -// -// } + final Future<Void> ft = quorum.getClient().receiveAndReplicate(req, + msg); - final Future<Void> ft = quorum.getClient() - .receiveAndReplicate(req, msg); + return getProxy(ft); - return getProxy(ft); - } /* Copied: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config (from rev 6668, branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.config) =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2012-10-25 18:01:34 UTC (rev 6688) @@ -0,0 +1,295 @@ +import net.jini.jeri.BasicILFactory; +import net.jini.jeri.BasicJeriExporter; +import net.jini.jeri.tcp.TcpServerEndpoint; + +import net.jini.discovery.LookupDiscovery; +import net.jini.core.discovery.LookupLocator; +import net.jini.core.entry.Entry; +import net.jini.lookup.entry.Name; +import net.jini.lookup.entry.Comment; +import net.jini.lookup.entry.Address; +import net.jini.lookup.entry.Location; +import net.jini.lookup.entry.ServiceInfo; +import net.jini.core.lookup.ServiceTemplate; + +import java.io.File; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.UUID; + +import com.bigdata.util.NV; +import com.bigdata.util.config.NicUtil; +import com.bigdata.journal.Options; +import com.bigdata.journal.BufferMode; +import com.bigdata.journal.jini.ha.HAJournal; +import com.bigdata.jini.lookup.entry.*; +import com.bigdata.service.IBigdataClient; +import com.bigdata.service.AbstractTransactionService; +import com.bigdata.service.jini.*; +import com.bigdata.service.jini.lookup.DataServiceFilter; +import com.bigdata.service.jini.master.ServicesTemplate; +import com.bigdata.jini.start.config.*; +import com.bigdata.jini.util.ConfigMath; + +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.data.ACL; +import org.apache.zookeeper.data.Id; + +// imports for various options. +import com.bigdata.btree.IndexMetadata; +import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.spo.SPORelation; +import com.bigdata.rdf.spo.SPOKeyOrder; +import com.bigdata.rdf.lexicon.LexiconRelation; +import com.bigdata.rdf.lexicon.LexiconKeyOrder; +import com.bigdata.rawstore.Bytes; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeUnit.*; + +/* + * This is a sample configuration file for a highly available Journal. A + * version of this file must be available to each HAJournalServer in the + * pipeline. + */ + +/* + * Globals. + */ +bigdata { + + private static fedname = "benchmark"; + + // NanoSparqlServer (http) port. + private static nssPort = 8090; + + // write replication pipeline port (listener). + private static haPort = 9090; + + // The #of services in the write pipeline. + private static replicationFactor = 3; + + // The logical service identifier shared by all members of the quorum. + private static logicalServiceId = "test-1"; + + // The service directory. + private static serviceDir = new File(new File(fedname,logicalServiceId),"A"); + + // journal data directory. + private static dataDir = serviceDir; + + // HA log directory. + private static logDir = new File(serviceDir,"HALog"); + + // one federation, multicast discovery. + //static private groups = LookupDiscovery.ALL_GROUPS; + + // unicast discovery or multiple setups, MUST specify groups. + static private groups = new String[]{bigdata.fedname}; + + /** + * One or more unicast URIs of the form <code>jini://host/</code> + * or <code>jini://host:port/</code> (no default). + * + * This MAY be an empty array if you want to use multicast + * discovery <strong>and</strong> you have specified the groups as + * LookupDiscovery.ALL_GROUPS (a <code>null</code>). + */ + static private locators = new LookupLocator[] { + + // runs jini on the localhost using unicast locators. + new LookupLocator("jini://localhost/") + + // runs jini on one or more hosts using unicast locators. + //new LookupLocator("jini://"+jini1), + //new LookupLocator("jini://"+jini2), + + }; + + /** + * A common point to set the Zookeeper client's requested + * sessionTimeout and the jini lease timeout. The default lease + * renewal period for jini is 5 minutes while for zookeeper it is + * more like 5 seconds. This puts the two systems onto a similar + * timeout period so that a disconnected client is more likely to + * be noticed in roughly the same period of time for either + * system. A value larger than the zookeeper default helps to + * prevent client disconnects under sustained heavy load. + * + * If you use a short lease timeout (LT 20s), then you need to override + * properties properties for the net.jini.lease.LeaseRenewalManager + * or it will run in a tight loop (it's default roundTripTime is 10s + * and it schedules lease renewals proactively.) + */ + + // jini + static private leaseTimeout = ConfigMath.s2ms(20); + + // zookeeper + static private sessionTimeout = (int)ConfigMath.s2ms(5); + + /* + * Configuration for default KB. + */ + + private static namespace = "kb"; + + private static kb = new NV[] { + + /* Setup for QUADS mode without the full text index. */ + + new NV(BigdataSail.Options.TRUTH_MAINTENANCE, "false" ), + new NV(BigdataSail.Options.QUADS, "true"), + new NV(BigdataSail.Options.STATEMENT_IDENTIFIERS, "false"), + new NV(BigdataSail.Options.TEXT_INDEX, "false"), + new NV(BigdataSail.Options.AXIOMS_CLASS,"com.bigdata.rdf.axioms.NoAxioms"), + new NV(BigdataSail.Options.QUERY_TIME_EXPANDER, "false"), + + // Bump up the branching factor for the lexicon indices on the named kb. + // com.bigdata.namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor=400 + new NV(com.bigdata.config.Configuration.getOverrideProperty + ( namespace + "." + LexiconRelation.NAME_LEXICON_RELATION, + IndexMetadata.Options.BTREE_BRANCHING_FACTOR + ), "400"), + + // Bump up the branching factor for the statement indices on the named kb. + // com.bigdata.namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor=1024 + new NV(com.bigdata.config.Configuration.getOverrideProperty + ( namespace + "." + SPORelation.NAME_SPO_RELATION, + IndexMetadata.Options.BTREE_BRANCHING_FACTOR + ), "1024"), + }; + +} + +/* + * Zookeeper client configuration. + */ +org.apache.zookeeper.ZooKeeper { + + /* Root znode for the federation instance. */ + zroot = "/" + bigdata.fedname; + + /* A comma separated list of host:port pairs, where the port is + * the CLIENT port for the zookeeper server instance. + */ + // standalone. + servers = "localhost:2081"; + // ensemble +// servers = bigdata.zoo1+":2181" +// + ","+bigdata.zoo2+":2181" +// + ","+bigdata.zoo3+":2181" +// ; + + /* Session timeout (optional). */ + sessionTimeout = bigdata.sessionTimeout; + + /* + * ACL for the zookeeper nodes created by the bigdata federation. + * + * Note: zookeeper ACLs are not transmitted over secure channels + * and are placed into plain text Configuration files by the + * ServicesManagerServer. + */ + acl = new ACL[] { + + new ACL(ZooDefs.Perms.ALL, new Id("world", "anyone")) + + }; + +} + +/* + * You should not have to edit below this line. + */ + +/* + * Jini client configuration. + */ +com.bigdata.service.jini.JiniClient { + + groups = bigdata.groups; + + locators = bigdata.locators; + + entries = new Entry[] { + + // Optional metadata entries. + + }; + +} + +net.jini.lookup.JoinManager { + + maxLeaseDuration = bigdata.leaseTimeout; + +} + +/* + * Server configuration options. + */ +com.bigdata.journal.jini.ha.HAJournalServer { + + serviceDir = bigdata.serviceDir; + + logicalServiceId = bigdata.logicalServiceId; + + writePipelineAddr = new InetSocketAddress("localhost",bigdata.haPort); + + /* + writePipelineAddr = new InetSocketAddress(// + InetAddress.getByName(// + NicUtil.getIpAddress("default.nic", "default", + false// loopbackOk + )), // + bigdata.haPort + ); + */ + + pipelineUUIDs = bigdata.pipeline; + + replicationFactor = bigdata.replicationFactor; + +} + +/* + * Journal configuration. + */ +com.bigdata.journal.jini.ha.HAJournal { + + properties = (NV[]) ConfigMath.concat(new NV[] { + + new NV(Options.FILE, + ConfigMath.getAbsolutePath(new File(bigdata.dataDir,"bigdata-ha.jnl"))), + + new NV(Options.BUFFER_MODE,""+BufferMode.DiskRW), + + new NV(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY,"4000"), + + new NV(IndexMetadata.Options.BTREE_BRANCHING_FACTOR,"128"), + + new NV(AbstractTransactionService.Options.MIN_RELEASE_AGE,"1"), + + new NV(HAJournal.Options.HA_LOG_DIR, ""+bigdata.logDir), + + }, bigdata.kb); + +} + +/* + * NanoSparqlServer configuration. + */ +com.bigdata.rdf.sail.webapp.NanoSparqlServer { + + namespace = bigdata.namespace; + + create = true; + + queryThreadPoolSize = 16; + + describeEachNamedGraph = true; + + port = bigdata.nssPort; + +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config 2012-10-24 13:17:54 UTC (rev 6687) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-B.config 2012-10-25 18:01:34 UTC (rev 6688) @@ -50,16 +50,7 @@ /* * This is a sample configuration file for a highly available Journal. A * version of this file must be available to each HAJournalServer in the - * pipeline. The pipeline depends on the stable assignment of ServiceID - * to HAJournalServers. A unique ServiceID must be explicitly assigned to - * each HAJournalServer in its configuration entry. The ordered list of - * those ServiceIDs is shared by all services and defines the write - * replication pipeline. The first entry in the write replication pipeline - * is the leader (aka master). You can use UUID.randomUUID() or GenUUID - * to create UUIDs. - * - * Note: The ServiceUUID Entry MUST be different for each file. It assigns - * a ServiceID to the service! + * pipeline. */ /* @@ -81,11 +72,8 @@ // The logical service identifier shared by all members of the quorum. private static logicalServiceId = "test-1"; - // The ServiceID for *this* service -or- null to assign it dynamically. - private static serviceId = UUID.fromString("a6120400-d63d-40d6-8ddb-3c283d0d5e3c"); - - // The service directory (if serviceId is null, then you must override). - private static serviceDir = new File(fedname,""+serviceId); + // The service directory. + private static serviceDir = new File(new File(fedname,logicalServiceId),"B"); // journal data directory. private static dataDir = serviceDir; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2012-10-24 13:17:54 UTC (rev 6687) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2012-10-25 18:01:34 UTC (rev 6688) @@ -50,16 +50,7 @@ /* * This is a sample configuration file for a highly available Journal. A * version of this file must be available to each HAJournalServer in the - * pipeline. The pipeline depends on the stable assignment of ServiceID - * to HAJournalServers. A unique ServiceID must be explicitly assigned to - * each HAJournalServer in its configuration entry. The ordered list of - * those ServiceIDs is shared by all services and defines the write - * replication pipeline. The first entry in the write replication pipeline - * is the leader (aka master). You can use UUID.randomUUID() or GenUUID - * to create UUIDs. - * - * Note: The ServiceUUID Entry MUST be different for each file. It assigns - * a ServiceID to the service! + * pipeline. */ /* @@ -81,12 +72,9 @@ // The logical service identifier shared by all members of the quorum. private static logicalServiceId = "test-1"; - // The ServiceID for *this* service -or- null to assign it dynamically. - private static serviceId = UUID.fromString("d609dcf7-860c-40f1-bd2f-eebdce20556c"); + // The service directory. + private static serviceDir = new File(new File(fedname,logicalServiceId),"C"); - // The service directory (if serviceId is null, then you must override). - private static serviceDir = new File(fedname,""+serviceId); - // journal data directory. private static dataDir = serviceDir; Deleted: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.config =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.config 2012-10-24 13:17:54 UTC (rev 6687) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.config 2012-10-25 18:01:34 UTC (rev 6688) @@ -1,307 +0,0 @@ -import net.jini.jeri.BasicILFactory; -import net.jini.jeri.BasicJeriExporter; -import net.jini.jeri.tcp.TcpServerEndpoint; - -import net.jini.discovery.LookupDiscovery; -import net.jini.core.discovery.LookupLocator; -import net.jini.core.entry.Entry; -import net.jini.lookup.entry.Name; -import net.jini.lookup.entry.Comment; -import net.jini.lookup.entry.Address; -import net.jini.lookup.entry.Location; -import net.jini.lookup.entry.ServiceInfo; -import net.jini.core.lookup.ServiceTemplate; - -import java.io.File; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.UUID; - -import com.bigdata.util.NV; -import com.bigdata.util.config.NicUtil; -import com.bigdata.journal.Options; -import com.bigdata.journal.BufferMode; -import com.bigdata.journal.jini.ha.HAJournal; -import com.bigdata.jini.lookup.entry.*; -import com.bigdata.service.IBigdataClient; -import com.bigdata.service.AbstractTransactionService; -import com.bigdata.service.jini.*; -import com.bigdata.service.jini.lookup.DataServiceFilter; -import com.bigdata.service.jini.master.ServicesTemplate; -import com.bigdata.jini.start.config.*; -import com.bigdata.jini.util.ConfigMath; - -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.data.ACL; -import org.apache.zookeeper.data.Id; - -// imports for various options. -import com.bigdata.btree.IndexMetadata; -import com.bigdata.btree.keys.KeyBuilder; -import com.bigdata.rdf.sail.BigdataSail; -import com.bigdata.rdf.spo.SPORelation; -import com.bigdata.rdf.spo.SPOKeyOrder; -import com.bigdata.rdf.lexicon.LexiconRelation; -import com.bigdata.rdf.lexicon.LexiconKeyOrder; -import com.bigdata.rawstore.Bytes; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeUnit.*; - -/* - * This is a sample configuration file for a highly available Journal. A - * version of this file must be available to each HAJournalServer in the - * pipeline. The pipeline depends on the stable assignment of ServiceID - * to HAJournalServers. A unique ServiceID must be explicitly assigned to - * each HAJournalServer in its configuration entry. The ordered list of - * those ServiceIDs is shared by all services and defines the write - * replication pipeline. The first entry in the write replication pipeline - * is the leader (aka master). You can use UUID.randomUUID() or GenUUID - * to create UUIDs. - * - * Note: The ServiceUUID Entry MUST be different for each file. It assigns - * a ServiceID to the service! - */ - -/* - * Globals. - */ -bigdata { - - private static fedname = "benchmark"; - - // NanoSparqlServer (http) port. - private static nssPort = 8090; - - // write replication pipeline port (listener). - private static haPort = 9090; - - // The #of services in the write pipeline. - private static replicationFactor = 3; - - // The logical service identifier shared by all members of the quorum. - private static logicalServiceId = "test-1"; - - // The ServiceID for *this* service -or- null to assign it dynamically. - private static serviceId = UUID.fromString("3c7e7639-78bf-452c-9ca9-2960caec17dc"); - - // The service directory (if serviceId is null, then you must override). - private static serviceDir = new File(fedname,""+serviceId); - - // journal data directory. - private static dataDir = serviceDir; - - // HA log directory. - private static logDir = new File(serviceDir,"HALog"); - - // one federation, multicast discovery. - //static private groups = LookupDiscovery.ALL_GROUPS; - - // unicast discovery or multiple setups, MUST specify groups. - static private groups = new String[]{bigdata.fedname}; - - /** - * One or more unicast URIs of the form <code>jini://host/</code> - * or <code>jini://host:port/</code> (no default). - * - * This MAY be an empty array if you want to use multicast - * discovery <strong>and</strong> you have specified the groups as - * LookupDiscovery.ALL_GROUPS (a <code>null</code>). - */ - static private locators = new LookupLocator[] { - - // runs jini on the localhost using unicast locators. - new LookupLocator("jini://localhost/") - - // runs jini on one or more hosts using unicast locators. - //new LookupLocator("jini://"+jini1), - //new LookupLocator("jini://"+jini2), - - }; - - /** - * A common point to set the Zookeeper client's requested - * sessionTimeout and the jini lease timeout. The default lease - * renewal period for jini is 5 minutes while for zookeeper it is - * more like 5 seconds. This puts the two systems onto a similar - * timeout period so that a disconnected client is more likely to - * be noticed in roughly the same period of time for either - * system. A value larger than the zookeeper default helps to - * prevent client disconnects under sustained heavy load. - * - * If you use a short lease timeout (LT 20s), then you need to override - * properties properties for the net.jini.lease.LeaseRenewalManager - * or it will run in a tight loop (it's default roundTripTime is 10s - * and it schedules lease renewals proactively.) - */ - - // jini - static private leaseTimeout = ConfigMath.s2ms(20); - - // zookeeper - static private sessionTimeout = (int)ConfigMath.s2ms(5); - - /* - * Configuration for default KB. - */ - - private static namespace = "kb"; - - private static kb = new NV[] { - - /* Setup for QUADS mode without the full text index. */ - - new NV(BigdataSail.Options.TRUTH_MAINTENANCE, "false" ), - new NV(BigdataSail.Options.QUADS, "true"), - new NV(BigdataSail.Options.STATEMENT_IDENTIFIERS, "false"), - new NV(BigdataSail.Options.TEXT_INDEX, "false"), - new NV(BigdataSail.Options.AXIOMS_CLASS,"com.bigdata.rdf.axioms.NoAxioms"), - new NV(BigdataSail.Options.QUERY_TIME_EXPANDER, "false"), - - // Bump up the branching factor for the lexicon indices on the named kb. - // com.bigdata.namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor=400 - new NV(com.bigdata.config.Configuration.getOverrideProperty - ( namespace + "." + LexiconRelation.NAME_LEXICON_RELATION, - IndexMetadata.Options.BTREE_BRANCHING_FACTOR - ), "400"), - - // Bump up the branching factor for the statement indices on the named kb. - // com.bigdata.namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor=1024 - new NV(com.bigdata.config.Configuration.getOverrideProperty - ( namespace + "." + SPORelation.NAME_SPO_RELATION, - IndexMetadata.Options.BTREE_BRANCHING_FACTOR - ), "1024"), - }; - -} - -/* - * Zookeeper client configuration. - */ -org.apache.zookeeper.ZooKeeper { - - /* Root znode for the federation instance. */ - zroot = "/" + bigdata.fedname; - - /* A comma separated list of host:port pairs, where the port is - * the CLIENT port for the zookeeper server instance. - */ - // standalone. - servers = "localhost:2081"; - // ensemble -// servers = bigdata.zoo1+":2181" -// + ","+bigdata.zoo2+":2181" -// + ","+bigdata.zoo3+":2181" -// ; - - /* Session timeout (optional). */ - sessionTimeout = bigdata.sessionTimeout; - - /* - * ACL for the zookeeper nodes created by the bigdata federation. - * - * Note: zookeeper ACLs are not transmitted over secure channels - * and are placed into plain text Configuration files by the - * ServicesManagerServer. - */ - acl = new ACL[] { - - new ACL(ZooDefs.Perms.ALL, new Id("world", "anyone")) - - }; - -} - -/* - * You should not have to edit below this line. - */ - -/* - * Jini client configuration. - */ -com.bigdata.service.jini.JiniClient { - - groups = bigdata.groups; - - locators = bigdata.locators; - - entries = new Entry[] { - - // Optional metadata entries. - - }; - -} - -net.jini.lookup.JoinManager { - - maxLeaseDuration = bigdata.leaseTimeout; - -} - -/* - * Server configuration options. - */ -com.bigdata.journal.jini.ha.HAJournalServer { - - serviceDir = bigdata.serviceDir; - - logicalServiceId = bigdata.logicalServiceId; - - writePipelineAddr = new InetSocketAddress("localhost",bigdata.haPort); - - /* - writePipelineAddr = new InetSocketAddress(// - InetAddress.getByName(// - NicUtil.getIpAddress("default.nic", "default", - false// loopbackOk - )), // - bigdata.haPort - ); - */ - - pipelineUUIDs = bigdata.pipeline; - - replicationFactor = bigdata.replicationFactor; - -} - -/* - * Journal configuration. - */ -com.bigdata.journal.jini.ha.HAJournal { - - properties = (NV[]) ConfigMath.concat(new NV[] { - - new NV(Options.FILE, - ConfigMath.getAbsolutePath(new File(bigdata.dataDir,"bigdata-ha.jnl"))), - - new NV(Options.BUFFER_MODE,""+BufferMode.DiskRW), - - new NV(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY,"4000"), - - new NV(IndexMetadata.Options.BTREE_BRANCHING_FACTOR,"128"), - - new NV(AbstractTransactionService.Options.MIN_RELEASE_AGE,"1"), - - new NV(HAJournal.Options.HA_LOG_DIR, ""+bigdata.logDir), - - }, bigdata.kb); - -} - -/* - * NanoSparqlServer configuration. - */ -com.bigdata.rdf.sail.webapp.NanoSparqlServer { - - namespace = bigdata.namespace; - - create = true; - - queryThreadPoolSize = 16; - - describeEachNamedGraph = true; - - port = bigdata.nssPort; - -} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-24 13:17:54 UTC (rev 6687) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-25 18:01:34 UTC (rev 6688) @@ -513,11 +513,14 @@ */ final IHALogReader r = getHALogWriter().getReader(commitCounter); + // Task sends an HALog file along the pipeline. final FutureTask<Void> ft = new FutureTaskMon<Void>( new SendHALogTask(req, r)); + // Run task. getExecutorService().submit(ft); + // Return *ASYNCHRONOUS* proxy (interruptable). return getProxy(ft, true/* asynch */); } @@ -552,8 +555,7 @@ final IHABufferStrategy strategy = (IHABufferStrategy) HAJournal.this .getBufferStrategy(); - // get message and write cache buffer. - // the buffer will be ignored if it is a WORM strategy + // get message and fill write cache buffer (unless WORM). final IHAWriteMessage msg = r.processNextBuffer(buf.buffer()); if (haLog.isDebugEnabled()) Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-24 13:17:54 UTC (rev 6687) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-25 18:01:34 UTC (rev 6688) @@ -21,6 +21,7 @@ import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -48,6 +49,7 @@ import com.bigdata.ha.halog.HALogWriter; import com.bigdata.ha.msg.HALogRequest; import com.bigdata.ha.msg.HALogRootBlocksRequest; +import com.bigdata.ha.msg.HARootBlockRequest; import com.bigdata.ha.msg.IHALogRequest; import com.bigdata.ha.msg.IHALogRootBlocksResponse; import com.bigdata.ha.msg.IHAWriteMessage; @@ -94,8 +96,9 @@ /** * Configuration options for the {@link HAJournalServer}. */ - public interface ConfigurationOptions { - + public interface ConfigurationOptions extends + AbstractServer.ConfigurationOptions { + String COMPONENT = HAJournalServer.class.getName(); /** @@ -187,6 +190,22 @@ */ private Quorum<HAGlue, QuorumService<HAGlue>> quorum; +// /** +// * Class models the invariants for an attempt to join a met quorum. +// */ +// private static class SyncState { +// private final IRootBlockView expected; +// private final long quorumToken; +// private long nextBlockSeq; +// private boolean didCommit; +// +// public SyncState(final IRootBlockView expected, final long quorumToken, +// final long nextBlockSeq, final boolean didCommit) { +// this.expected = expected; +// } +// } +// private final Lock syncLock = new ReentrantLock(); + /** * An embedded jetty server exposing the {@link NanoSparqlServer} webapp. * The {@link NanoSparqlServer} webapp exposes a SPARQL endpoint for the @@ -947,24 +966,25 @@ * atomically can log from the write pipeline rather than replicating a * logged write. At that point, the service can vote its lastCommitTime * and will join the met quorum. - * - * TODO RESYNC : In fact, the service can always begin logging from the write + * <p> + * Note: In fact, the service can always begin logging from the write * pipeline as soon as it observes (or acquires) the root block and * observes the seq=0 write cache block (or otherwise catches up with * the write pipeline). However, it can not write those data onto the * local journal until it is fully caught up. This optimization might * allow the service to catch up slightly faster, but the code would be * a bit more complex. - * - * FIXME RESYNC : Examine how the service joins during the 2-phase - * commit and how it participates during the 2-phase prepare (and - * whether it needs to participate - if not, then rollback some of the - * changes to support non-joined services in the 2-phase commit to - * simplify the code). */ private class ResyncTask implements Callable<Void> { + /** + * The quorum token in effect when we began the resync. + */ private final long token; + /** + * The quorum leader. This is fixed until the quorum breaks or the + * resync ends. + */ private final S leader; public ResyncTask() { @@ -1023,20 +1043,24 @@ haLog.warn("RESYNCH: " + server.getServiceName()); - /* - * Note: We need to discard any writes that might have been - * buffered before we start the resynchronization of the local - * store. - * - * TODO This might not be necessary. We do a low-level abort - * when we install the root blocks from the quorum leader before - * we sync the first commit point. - */ +// /* +// * Note: We need to discard any writes that might have been +// * buffered before we start the resynchronization of the local +// * store. +// * +// * TODO This might not be necessary. We do a low-level abort +// * when we install the root blocks from the quorum leader before +// * we sync the first commit point. +// */ +// +// journal.doLocalAbort(); - journal.doLocalAbort(); - - while (true) { + // Until joined with the met quorum. + while (!getQuorum().getMember().isJoinedMember(token)) { + // Abort if the quorum breaks. + getQuorum().assertQuorum(token); + // The current commit point on the local store. final long commitCounter = journal.getRootBlockView() .getCommitCounter(); @@ -1054,21 +1078,23 @@ * eventually going through a local commit when we receiving the * closing {@link IRootBlockView} for that write set. * - * @param commitCounter - * The commit counter for the desired write set. + * @param closingCommitCounter + * ... [truncated message content] |
From: <tho...@us...> - 2012-10-30 18:03:47
|
Revision: 6693 http://bigdata.svn.sourceforge.net/bigdata/?rev=6693&view=rev Author: thompsonbry Date: 2012-10-30 18:03:37 +0000 (Tue, 30 Oct 2012) Log Message: ----------- Working through resynchronization protocol with Martyn for the HA Journal. At this point a quiescent resynchronization of a service that had not yet started from the HALogs on the other services can be reliably performed. Problems with the transition to a joined service have been observed when resynchronizing under a sustained write load. Problems have been observed when a REBUILD is falsely initiated when some HALogs have been purged, but the necessary HALogs are still available. HALogs are still disabled in HAJournalServer. - done. pipelineChange() event on (B) can fail to establish (C) as the downstream for the receive/send service. The problem appears to be that the HASendService.terminate() method is NOT returning. Thus we are blocked waiting for that tear down and the replication is not setup for (C). Ok. The sendService was null (B is not the leader, so the receiveService is running, not the sendService). The problem appears to be the timing when we bring up the sendService. It used to be done in HAReceiveService.changeDownStream(addrNext), but it is commented out there. I think that I have gotten (close to) the bottom of the HASend/Receive service problem. The addrNext field was final in the ReadTask and its Client reference. A pipeline change would update addrNext, but the new value would not be visible inside of ReadState/Client. The remaining problem appears to occur when the upstream service changes. The HAReceiveService.Client class has a client socket connection for the upstream service. That socket connection needs to be torn down and a new one established when the upstream service changes. We currently handle downstream changes. We need to add code that calls through for upstream changes as well. That needs to be hooked into QuorumPipelineImpl and HAReceiveService. I.e., an upstreamChange() method. This was integrated into the quorum code, which now invoked the new QuorumPipeline.pipelineUpstreamChange() method when the upstream service has left the pipeline. - Refactored the retrySend() logic, moving it into QuorumPipelineImpl. This logic is now applied to both live write cache replication and historical write cache replication (HALogs). Modified HAJournalServer to ignore a duplicate message in the special case where we have just received the same message. Duplicate message can occur through the normal functioning of retrySend. @see https://sourceforge.net/apps/trac/bigdata/ticket/530 (Journal HA) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorumMember.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/quorum/QuorumStateChangeListener.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/quorum/QuorumStateChangeListenerBase.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java 2012-10-30 11:17:27 UTC (rev 6692) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java 2012-10-30 18:03:37 UTC (rev 6693) @@ -37,6 +37,7 @@ import com.bigdata.io.writecache.WriteCache; import com.bigdata.journal.IRootBlockView; import com.bigdata.quorum.Quorum; +import com.bigdata.quorum.QuorumMember; /** * A non-remote interface for a member service in a {@link Quorum} defining @@ -53,6 +54,12 @@ * leader. The payload is replicated to the first follower in the write * pipeline. That follower will accept the payload (and replicate it if * necessary) using {@link #receiveAndReplicate(IHAWriteMessage)}. + * <p> + * Note: The implementation of this method should be robust to changes in + * the write pipeline. Specifically, if a follower leaves the write + * pipeline, it should attempt to retransmit the message and the payload + * while allowing time for the write pipeline to be reconfigured in response + * to the related {@link QuorumMember} events. * * @param req * A request for an HALog (optional). This is only non-null when @@ -61,7 +68,9 @@ * @param msg * The RMI metadata about the payload. * @param b - * The payload. + * The payload. The bytes from the position to the limit will be + * transmitted (note that the #of bytes remaining in the buffer + * MUST agree with {@link IHAWriteMessage#getSize()}). */ Future<Void> replicate(IHALogRequest req, IHAWriteMessage msg, ByteBuffer b) throws IOException; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2012-10-30 11:17:27 UTC (rev 6692) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2012-10-30 18:03:37 UTC (rev 6693) @@ -52,10 +52,12 @@ import com.bigdata.ha.pipeline.HASendService; import com.bigdata.io.DirectBufferPool; import com.bigdata.io.IBufferAccess; +import com.bigdata.io.writecache.WriteCache; import com.bigdata.quorum.QuorumException; import com.bigdata.quorum.QuorumMember; import com.bigdata.quorum.QuorumStateChangeListener; import com.bigdata.quorum.QuorumStateChangeListenerBase; +import com.bigdata.util.InnerCause; /** * {@link QuorumPipeline} implementation. @@ -413,26 +415,58 @@ if (log.isInfoEnabled()) log.info("oldDownStreamId=" + oldDownStreamId + ",newDownStreamId=" + newDownStreamId + ", addrNext=" - + addrNext); + + addrNext + ", sendService=" + sendService + + ", receiveService=" + receiveService); if (sendService != null) { - // Terminate the existing connection. + /* + * Terminate the existing connection (we were the first service + * in the pipeline). + */ sendService.terminate(); if (addrNext != null) { + if (log.isDebugEnabled()) + log.debug("sendService.start(): addrNext=" + addrNext); sendService.start(addrNext); } } else if (receiveService != null) { /* - * Reconfigure the receive service to change how it is relaying. + * Reconfigure the receive service to change how it is relaying + * (we were relaying, so the receiveService was running but not + * the sendService). */ + if (log.isDebugEnabled()) + log.debug("receiveService.changeDownStream(): addrNext=" + + addrNext); receiveService.changeDownStream(addrNext); } // populate and/or clear the cache. cachePipelineState(newDownStreamId); + if (log.isDebugEnabled()) + log.debug("pipelineChange - done."); } finally { lock.unlock(); } } + @Override + public void pipelineUpstreamChange() { + super.pipelineUpstreamChange(); + lock.lock(); + try { + if (receiveService != null) { + /* + * Make sure that the receiveService closes out its client + * connection with the old upstream service. + */ + if (log.isInfoEnabled()) + log.info("receiveService=" + receiveService); + receiveService.changeUpStream(); + } + } finally { + lock.unlock(); + } + } + /** * Request the {@link InetSocketAddress} of the write pipeline for a service * (RMI). @@ -694,37 +728,283 @@ lock.lock(); try { - if (log.isTraceEnabled()) - log.trace("Leader will send: " + b.remaining() + " bytes"); + ft = new FutureTask<Void>(new RobustReplicateTask(req, msg, b)); - // Note: disable assert if we allow non-leaders to replicate HALog - // messages (or just verify service joined with the quorum). + } finally { + + lock.unlock(); + + } + + // Submit Future for execution (outside of the lock). + member.getExecutor().execute(ft); + + // Return Future. Caller must wait on the Future. + return ft; + + } + + /** + * Task robustly replicates an {@link IHAWriteMessage} and the associated + * payload. + */ + private class RobustReplicateTask implements Callable<Void> { + + /** + * An historical message is indicated when the {@link IHALogRequest} is + * non-<code>null</code>. + */ + private final IHALogRequest req; + + /** + * The {@link IHAWriteMessage}. + */ + private final IHAWriteMessage msg; + + /** + * The associated payload. + */ + private final ByteBuffer b; + + /** + * The token for the leader. + */ + private final long quorumToken; + + /** + * The #of times the leader in a highly available quorum will attempt to + * retransmit the current write cache block if there is an error when + * sending that write cache block to the downstream node. + */ + static protected final int RETRY_COUNT = 3; + + /** + * The timeout for a sleep before the next retry. This timeout is designed + * to allow some asynchronous processes to reconnect the + * {@link HASendService} and the {@link HAReceiveService}s in write pipeline + * such that a retransmit can succeed after a service has left the pipeline. + */ + static protected final int RETRY_SLEEP = 50; + + public RobustReplicateTask(final IHALogRequest req, final IHAWriteMessage msg, + final ByteBuffer b) { + + // Note: [req] MAY be null. + + if (msg == null) + throw new IllegalArgumentException(); + + if (b == null) + throw new IllegalArgumentException(); + + this.req = req; + + this.msg = msg; + + this.b = b; + + if (b.remaining() == 0) { + + // Empty buffer. + + throw new IllegalStateException("Empty buffer: req=" + req + + ", msg=" + msg + ", buffer=" + b); + + } + if (req == null) { - // Note: Do not test quorum token for historical writes. - member.assertLeader(msg.getQuorumToken()); + + /* + * Live message. + * + * Use the quorum token on the message. It was put there by the + * WriteCacheService. This allows us to ensure that the qourum + * token remains valid for all messages replicated by the + * leader. + */ + + quorumToken = msg.getQuorumToken(); + + } else { + + /* + * Historical message. + */ + + // Use the current quorum token. + quorumToken = member.getQuorum().token(); + } + + } + + public Void call() throws Exception { - final PipelineState<S> downstream = pipelineStateRef.get(); + try { - final HASendService sendService = getHASendService(); + innerReplicate(0/* retryCount */); + + } catch (Throwable t) { - ft = new FutureTask<Void>(new SendBufferTask<S>(req, msg, b, - downstream, sendService, sendLock)); + if (InnerCause.isInnerCause(t, InterruptedException.class)) { + + throw (InterruptedException) t; + + } + + // Log initial error. + log.error(t, t); - } finally { + if (!retrySend()) { - lock.unlock(); + // Rethrow the original exception. + throw new RuntimeException( + "Giving up. Could not send after " + RETRY_COUNT + + " attempts : " + t, t); + } + + } + + return null; + } - // execute the FutureTask. - member.getExecutor().execute(ft); + /** + * Replicate from the leader to the first follower. Each non-final + * follower will receiveAndReplicate the write cache buffer. The last + * follower will receive the buffer. + * + * @param retryCount + * The #of attempts and ZERO (0) if this is the first + * attempt. + * + * @throws Exception + */ + private void innerReplicate(final int retryCount) throws Exception { - return ft; + lock.lockInterruptibly(); - } + try { + if (log.isTraceEnabled()) + log.trace("Leader will send: " + b.remaining() + + " bytes, retryCount=" + retryCount + ", req=" + + req + ", msg=" + msg); + + /* + * Note: disable assert if we allow non-leaders to replicate + * HALog messages (or just verify service joined with the + * quorum). + */ + + if (req == null) { + + // // Note: Do not test quorum token for historical writes. + // member.assertLeader(msg.getQuorumToken()); + + /* + * This service must be the leader. + * + * Note: The [quorumToken] is from the message IFF this is a + * live message and is otherwise the current quorum token. + */ + member.assertLeader(quorumToken); + + } + + final PipelineState<S> downstream = pipelineStateRef.get(); + + final HASendService sendService = getHASendService(); + + final ByteBuffer b = this.b.duplicate(); + + new SendBufferTask<S>(req, msg, b, downstream, sendService, + sendLock).call(); + + return; + + } finally { + + lock.unlock(); + + } + + } + + /** + * Robust retransmit of the current cache block. This method is designed to + * handle several kinds of recoverable errors, including: + * <ul> + * <li>downstream service leaves the pipeline</li> + * <li>intermittent failure sending the RMI message</li> + * <li>intermittent failure sending the payload</li> + * </ul> + * The basic pattern is that it will retry the operation a few times to see + * if there is a repeatable error. Each time it attempts the operation it + * will discover the current downstream serviceId and verify that the quorum + * is still valid. Each error (including the first) is logged. If the + * operation fails, the original error is rethrown. If the operation + * succeeds, then the cache block was successfully transmitted to the + * current downstream service and this method returns without error. + * + * @throws InterruptedException + */ + private boolean retrySend() throws InterruptedException { + + // we already tried once. + int tryCount = 1; + + // now try some more times. + for (; tryCount < RETRY_COUNT; tryCount++) { + + // Sleep before each retry (including the first). + Thread.sleep(RETRY_SLEEP/* ms */); + + try { + + // send to 1st follower. + innerReplicate(tryCount); + + // Success. + return true; + + } catch (Exception ex) { + + // log and retry. + log.error("retry=" + tryCount + " : " + ex, ex); + + continue; + + } + + } + + // Send was not successful. + return false; + + } // retrySend() + + } // class RobustReplicateTask + /** + * The logic needs to support the asynchronous termination of the + * {@link Future} that is responsible for replicating the {@link WriteCache} + * block, which is why the API exposes the means to inform the caller about + * that {@link Future}. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + public interface IRetrySendCallback { + /** + * + * @param remoteFuture + */ + void notifyRemoteFuture(final Future<Void> remoteFuture); + } + + /** * Task to send() a buffer to the follower. */ static private class SendBufferTask<S extends HAPipelineGlue> implements Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java 2012-10-30 11:17:27 UTC (rev 6692) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java 2012-10-30 18:03:37 UTC (rev 6693) @@ -8,15 +8,24 @@ Greensboro, NC 27410 lic...@bi... -This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. -This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. -You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ package com.bigdata.ha.pipeline; import java.io.IOException; +import java.net.BindException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.nio.channels.SelectionKey; @@ -40,6 +49,7 @@ import org.apache.log4j.Logger; +import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.ha.msg.IHAWriteMessageBase; import com.bigdata.ha.pipeline.HASendService.IncSendTask; import com.bigdata.io.writecache.WriteCache; @@ -83,7 +93,7 @@ * always allocated, but it will be running iff this service will relay the * data to a downstream service. */ - private final HASendService downstream; + private final HASendService sendService; private final ExecutorService executor = Executors.newSingleThreadExecutor(); @@ -113,13 +123,72 @@ /* * The lock and the things which it guards. */ + + /** + * The {@link Lock}. + */ private final Lock lock = new ReentrantLock(); - private final Condition futureReady = lock.newCondition(); + + /** + * {@link Condition} signaled when the {@link #waitFuture} is ready. + * {@link #receiveData(IHAWriteMessageBase, ByteBuffer)} awaits this + * {@link Condition}. Once signaled, it returns the {@link #waitFuture} to + * the caller and clears {@link #waitFuture} to <code>null</code>. + * <p> + * The {@link Condition}s {@link #messageReady} and {@link #futureRead} + * respectively manage the hand off of the message (to the {@link ReadTask}) + * and the {@link #waitFuture} (to the thread calling + * {@link #receiveData(IHAWriteMessageBase, ByteBuffer)}. + */ + private final Condition futureReady = lock.newCondition(); + + /** + * {@link Condition} signaled when a new {@link IHAWriteMessage} has been + * set on {@link #message} by + * {@link #receiveData(IHAWriteMessageBase, ByteBuffer)}. + */ private final Condition messageReady = lock.newCondition(); + + /** + * {@link RunState} for the {@link HAReceiveService}. This is used to manage + * startup and termination state transitions. + */ private RunState runState = RunState.Start; - private IHAWriteMessageBase message; + + /** + * The current {@link IHAWriteMessageBase}. This message provides metadata + * about the expected buffer transfer. This field is set by + * {@link #receiveData(IHAWriteMessageBase, ByteBuffer)}. + */ + private M message; + + /** + * The current receive buffer. This buffer is populated with data based on + * the expected {@link IHAWriteMessage#getSize()}. The data is verified by + * comparing the checksum of the buffer to the expected checksum as + * specified by {@link IHAWriteMessage#getChk()}. + */ private ByteBuffer localBuffer; + + /** + * {@link Future} for the current buffer transfer used to await the + * termination of that transfer by the {@link ReadTask}. + * <p> + * Note: The {@link #readFuture} is cleared to <code>null</code> as soon as + * the buffer transfer is complete. + */ private FutureTask<Void> readFuture; + + /** + * {@link Future} for the current buffer transfer used to await the + * termination of that transfer by the thread that calls + * {@link #receiveData(IHAWriteMessageBase, ByteBuffer)}. + * <p> + * Note: The {@link #waitFuture} is cleared to <code>null</code> as soon as + * it is returned to the caller. This can occur before the buffer transfer + * is complete. Therefore, {@link ReadTask} MUST NOT wait on the + * {@link #waitFuture}. + */ private FutureTask<Void> waitFuture; /** @@ -127,21 +196,47 @@ * transfer will be relayed as it is received (optional and may be * <code>null</code>). * <p> - * Note: This is volatile for visibility in {@link #toString()}, which does - * not obtain the {@link #lock}. + * Note: This an {@link AtomicReference} for visibility in + * {@link #toString()}, which does not obtain the {@link #lock}. The + * {@link AtomicReference} also make changes in the downstream service + * address visible inside of {@link ReadTask}. */ - private volatile InetSocketAddress addrNext; + private final AtomicReference<InetSocketAddress> addrNextRef; + /* + * Note: toString() implementation is non-blocking. + */ public String toString() { return super.toString() + "{addrSelf=" + addrSelf + ", addrNext=" - + addrNext + "}"; + + addrNextRef.get() + "}"; } + /** The Internet socket address at which this service will listen (immutable) */ + public InetSocketAddress getAddrSelf() { + + return addrSelf; + + } + /** + * The Internet socket address to which this service will relay messages + * (dynamic and MAY be <code>null</code>). + * + * @see #changeDownStream(InetSocketAddress) + */ + public InetSocketAddress getAddrNext() { + + return addrNextRef.get(); + + } + + /** * Create a new service instance - you MUST {@link Thread#start()} the * service. + * <p> + * Note: <i>addrNext</i> can be changed dynamically. * * @param addrSelf * The Internet socket address at which this service will listen. @@ -149,6 +244,8 @@ * The Internet socket address of a downstream service to which * each data transfer will be relayed as it is received * (optional). + * + * @see #changeDownStream(InetSocketAddress) */ public HAReceiveService(final InetSocketAddress addrSelf, final InetSocketAddress addrNext) { @@ -173,21 +270,26 @@ public HAReceiveService(final InetSocketAddress addrSelf, final InetSocketAddress addrNext, final IHAReceiveCallback<M> callback) { - + if (addrSelf == null) - throw new IllegalArgumentException(); + throw new IllegalArgumentException(); this.addrSelf = addrSelf; - this.addrNext = addrNext; + this.addrNextRef = new AtomicReference<InetSocketAddress>(addrNext); this.callback = callback; // Note: Always allocate since the addrNext can change. - this.downstream = new HASendService(); + this.sendService = new HASendService(); + // Thread will not prevent JVM exit. setDaemon(true); + // Give the thread a useful name. + setName(HAReceiveService.class.getName() + "@" + hashCode() + + "{addrSelf=" + addrSelf + "}"); + if (log.isInfoEnabled()) log.info("Created: " + this); @@ -225,8 +327,8 @@ lock.unlock(); } - if (downstream != null) - downstream.terminate(); + if (sendService != null) + sendService.terminate(); executor.shutdownNow(); @@ -266,22 +368,22 @@ } public void start() { - super.start(); - lock.lock(); - try { + super.start(); + lock.lock(); + try { // Wait for state change from Start while (runState == RunState.Start) { - try { - futureReady.await(); - } catch (InterruptedException e) { - // let's go around again - } + try { + futureReady.await(); + } catch (InterruptedException e) { + // let's go around again + } } } finally { lock.unlock(); } } - + public void run() { lock.lock(); try { @@ -298,7 +400,30 @@ * Open a non-blocking server socket channel and start listening. */ server = ServerSocketChannel.open(); - server.socket().bind(addrSelf); + { + /* + * Robustly attempt to bind the address and port where this + * service will listen. + * + * Note: The retry is here because the port is not freed up + * immediately when we close the existing socket connection + */ + boolean didBind = false; + for (int i = 0; i < 3; i++) { + try { + server.socket().bind(addrSelf); + didBind = true; + break; + } catch (BindException ex) { + log.warn("Sleeping to retry: " + ex); + Thread.sleep(100/* ms */); + continue; + } + } + if (!didBind) { + server.socket().bind(addrSelf); + } + } server.configureBlocking(false); if(log.isInfoEnabled()) log.info("Listening on: " + addrSelf); @@ -331,11 +456,21 @@ } /** - * Loops accepting requests and scheduling readTasks. Note that a local - * caller must hand us a buffer and {@link IHAWriteMessageBase} using - * {@link #receiveData(IHAWriteMessageBase, ByteBuffer)} before we will accept - * data on the {@link SocketChannel}. - * + * The client socket connection that was obtained when we accepted the + * upstream reader. + * <p> + * Note: The {@link Client} connection is reused across {@link ReadTask}s. + * <p> + * Note: Exposed to {@link #changeUpStream()}. + */ + private final AtomicReference<Client> clientRef = new AtomicReference<Client>(null); + + /** + * Loops accepting requests and scheduling {@link ReadTask}s. Note that a + * local caller must hand us a buffer and {@link IHAWriteMessageBase} using + * {@link #receiveData(IHAWriteMessageBase, ByteBuffer)} before we will + * accept data on the {@link SocketChannel}. + * * @throws IOException * @throws ExecutionException * @throws InterruptedException @@ -343,8 +478,6 @@ private void runNoBlock(final ServerSocketChannel server) throws IOException, InterruptedException, ExecutionException { - final AtomicReference<Client> clientRef = new AtomicReference<Client>(); - try { while (true) { @@ -369,12 +502,16 @@ messageReady.await(); } - // setup task. - waitFuture = new FutureTask<Void>(new ReadTask(server, clientRef, - message, localBuffer, downstream, addrNext, callback)); - readFuture = waitFuture; - message = null; - + // Setup task to read buffer for that message. + readFuture = waitFuture = new FutureTask<Void>( + new ReadTask<M>(server, clientRef, message, + localBuffer, sendService, addrNextRef, + callback)); + + // Message cleared once ReadTask started. + message = null; + + // [waitFuture] is available for receiveData(). futureReady.signal(); } finally { @@ -398,18 +535,22 @@ * Note: We might have to wait for the Future to avoid having * more than one ReadTask at a time, but we should log and * ignore any exception and restart the loop. + * + * The loop needs to keep running. The thread that called + * receiveData() will return the [waitFuture] and will notice + * any exception through that Future. */ try { - readFuture.get(); + readFuture.get(); } catch (Exception e) { - log.warn(e,e); + log.warn(e, e); } - + lock.lockInterruptibly(); try { - readFuture = null; + readFuture = null; } finally { - lock.unlock(); + lock.unlock(); } } // while(true) @@ -425,31 +566,37 @@ /** * Class encapsulates the connection state for the socket channel used to - * read on the upstream client. + * receive from on the upstream {@link HASendService}. * * @author <a href="mailto:tho...@us...">Bryan * Thompson</a> - * @version $Id$ */ static private class Client { - final SocketChannel client; - final Selector clientSelector; - final SelectionKey clientKey; + private final SocketChannel client; + private final Selector clientSelector; + private final SelectionKey clientKey; - final HASendService downstream; +// /** Used to replicate the message to the downstream service (if any). */ +// private final HASendService downstream; /** * Gets the client connection and open the channel in a non-blocking * mode so we will read whatever is available and loop until all data * has been read. */ - public Client(final ServerSocketChannel server, - final HASendService downstream, final InetSocketAddress addrNext) - throws IOException { + public Client(// + final ServerSocketChannel server // +// , final HASendService downstream // +// , final InetSocketAddress addrNext// + ) throws IOException { try { + /* + * Note: This binds a port for a specific upstream HASendService + * that will be talking to this HAReceiveService. + */ client = server.accept(); client.configureBlocking(false); @@ -459,15 +606,15 @@ clientKey = client.register(clientSelector, SelectionKey.OP_READ); - this.downstream = downstream; - - // Prepare downstream (if any) for incremental transfers - if (addrNext != null) { +// this.downstream = downstream; +// +// // Prepare downstream (if any) for incremental transfers +// if (addrNext != null) { +// +// downstream.start(addrNext); +// +// } - downstream.start(addrNext); - - } - } catch (IOException ex) { close(); @@ -483,13 +630,13 @@ try { client.close(); } finally { - try { +// try { clientSelector.close(); - } finally { - if (downstream != null) { - downstream.terminate(); - } - } +// } finally { +// if (downstream != null) { +// downstream.terminate(); +// } +// } } } @@ -504,9 +651,7 @@ * * @author <a href="mailto:tho...@us...">Bryan * Thompson</a> - * @version $Id: HAReceiveService.java 2826 2010-05-17 11:46:23Z - * martyncutcher $ - * + * * @todo report counters * <p> * report the #of chunks per payload so we can decide if the private @@ -528,13 +673,13 @@ /** * Used to transfer received data to the downstream service (if any). */ - private final HASendService downstream; + private final HASendService sendService; /** * The address of the downstream service -or- <code>null</code> iff * there is no downstream service. */ - private final InetSocketAddress addrNext; + private final AtomicReference<InetSocketAddress> addrNextRef; /** * Optional callback. @@ -567,16 +712,20 @@ * The {@link HASendService} used to relay data to the * downstream node. * @param addrNext - * The address of the downstream node (optional and - * <code>null</code> if this is the last node in the relay - * chain). + * An {@link AtomicReference} for address of the downstream + * node. The value within that {@link AtomicReference} may be + * updated by + * {@link HAReceiveService#changeDownStream(InetSocketAddress)} + * . That value will be <code>null</code> if this is the last + * node in the write pipeline at the time the value is + * observed. * @param callback * An optional callback. */ public ReadTask(final ServerSocketChannel server, final AtomicReference<Client> clientRef, final M message, final ByteBuffer localBuffer, final HASendService downstream, - final InetSocketAddress addrNext, + final AtomicReference<InetSocketAddress> addrNextRef, final IHAReceiveCallback<M> callback) { if (server == null) @@ -594,8 +743,8 @@ this.clientRef = clientRef; this.message = message; this.localBuffer = localBuffer; - this.downstream = downstream; - this.addrNext = addrNext; + this.sendService = downstream; + this.addrNextRef = addrNextRef; this.callback = callback; } @@ -697,13 +846,41 @@ // SelectionKey.OP_READ); Client client = clientRef.get(); + +// if (client != null) { +// +// /* +// * Note: We need to know when the client connection is no longer +// * valid. The code here does not appear to do the trick. +// * changeUpStream() is handling this instead. +// * +// * We need to decide whether the client is no longer valid +// * (either because the upstream HASendService has changed (our +// * predecessor in the pipeline might have died) or because it +// * has closed is socket connection to this HAReceiveService). +// * +// * Either way, we need to establish a client connection using +// * awaitAccept(). +// */ +// if (!client.client.isConnected()) { +// log.warn("Closing old client connection."); +// clientRef.set(client = null); +// } +// +// } + if (client == null) { + /* + * Accept and the initialize a connection from the upstream + * HASendService. + */ + // Accept a client connection (blocks) awaitAccept(); // New client connection. - client = new Client(server, downstream, addrNext); + client = new Client(server);//, sendService, addrNext); // save off reference. clientRef.set(client); @@ -715,36 +892,56 @@ * begin transferring data from the stream to the writeCache. */ final long begin = System.currentTimeMillis(); - int rem = message.getSize(); // #of bytes remaining (to be received). - while (rem > 0) { + // #of bytes remaining (to be received). + int rem = message.getSize(); + + // End of stream flag. + boolean EOS = false; + + while (rem > 0 && !EOS) { + // block up to the timeout. final int nkeys = client.clientSelector.select(10000/* ms */); + if (nkeys == 0) { + // Nothing available. final long elapsed = System.currentTimeMillis() - begin; + if (elapsed > 10000) { // Issue warning if we have been blocked for a while. log.warn("Blocked: awaiting " + rem + " out of " + message.getSize() + " bytes."); } + } + final Set<SelectionKey> keys = client.clientSelector .selectedKeys(); + final Iterator<SelectionKey> iter = keys.iterator(); + while (iter.hasNext()) { + iter.next(); iter.remove(); final int rdlen = client.client.read(localBuffer); + if (log.isTraceEnabled()) - log.trace("Read " + rdlen + " bytes of " + (rdlen > 0 ? rem - rdlen : rem) + " bytes remaining."); + log.trace("Read " + rdlen + " bytes of " + + (rdlen > 0 ? rem - rdlen : rem) + + " bytes remaining."); if (rdlen > 0) updateChk(rdlen); - if (rdlen == -1) + if (rdlen == -1) { + // The stream is closed? + EOS = true; break; + } rem -= rdlen; @@ -766,25 +963,42 @@ * The rdlen is checked for non zero to avoid an * IllegalArgumentException. */ + // dereference. + final InetSocketAddress addrNext = addrNextRef.get(); if (rdlen != 0 && addrNext != null) { if (log.isTraceEnabled()) - log - .trace("Incremental send of " + rdlen - + " bytes"); + log.trace("Incremental send of " + rdlen + " bytes"); final ByteBuffer out = localBuffer.asReadOnlyBuffer(); out.position(localBuffer.position() - rdlen); out.limit(localBuffer.position()); + synchronized (sendService) { + /* + * Note: Code block is synchronized on [downstream] + * to make the decision to start the HASendService + * that relays to [addrNext] atomic. The + * HASendService uses [synchronized] for its public + * methods so we can coordinate this lock with its + * synchronization API. + */ + if (!sendService.isRunning()) { + /* + * Prepare send service for incremental + * transfers to the specified address. + */ + sendService.start(addrNext); + } + } // Send and await Future. - downstream.send(out).get(); + sendService.send(out).get(); } } } // while( rem > 0 ) - assert localBuffer.position() == message.getSize() : "localBuffer.pos=" - + localBuffer.position() - + ", message.size=" - + message.getSize(); + if (localBuffer.position() != message.getSize()) + throw new IOException("Receive length error: localBuffer.pos=" + + localBuffer.position() + ", message.size=" + + message.getSize()); // prepare for reading. localBuffer.flip(); @@ -818,11 +1032,12 @@ * benefit from NIO efficiencies. This method will own the buffer * until the returned {@link Future} is done. * - * @return A future which you can await. The future will become available - * when the data has been transferred into the buffer, at which - * point the position will be ZERO (0) and the limit will be the #of - * bytes received into the buffer. If the data transfer fails or is - * interrupted, the future will report the exception. + * @return A {@link Future} which you can await. The {@link Future} will + * become available when the data has been transferred into the + * buffer, at which point the position will be ZERO (0) and the + * limit will be the #of bytes received into the buffer. If the data + * transfer fails or is interrupted, the {@link Future} will report + * the exception. * * @throws InterruptedException */ @@ -877,7 +1092,7 @@ * Hook to notice receive events. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ + * * @param <M> */ public interface IHAReceiveCallback<M extends IHAWriteMessageBase> { @@ -901,14 +1116,14 @@ /** * Change the address to which the payloads are being relayed. This - * terminates the embedded {@link HASendService} and then - * {@link HASendService#start(InetSocketAddress)}s it with the new address - * (if any). + * terminates the embedded {@link HASendService}. The {@link HASendService} + * will be restarted with the new {@link InetSocketAddress} (if any) by the + * {@link ReadTask}. * <p> - * The {@link ReadTask} will throw out an exception when if there was a - * downstream target when the {@link IncSendTask} is interrupted. Since the - * {@link ReadTask} lacks the context to issue the appropriate RMI to the - * downstream task, the exception must be caught hand handled by the + * Note: The {@link ReadTask} will throw out an exception when if there was + * a downstream target when the {@link IncSendTask} is interrupted. Since + * the {@link ReadTask} lacks the context to issue the appropriate RMI to + * the downstream task, the exception must be caught and handled by the * {@link WriteCacheService}. It can simply rediscover the new downstream * service and then re-submit both the RMI and the {@link WriteCache} block. * @@ -921,6 +1136,10 @@ lock.lock(); try { + if (log.isInfoEnabled()) + log.info("addrNext(old)=" + this.addrNextRef.get() + + ", addrNext(new)=" + addrNext); + if (readFuture != null) { // Interrupt the current receive operation. @@ -928,15 +1147,25 @@ } - // Terminate existing HASendService (if any). - downstream.terminate(); + synchronized (sendService) { - // Save the new addr. - this.addrNext = addrNext; + if (sendService.isRunning()) { + + // Terminate HASendService (iff running). + sendService.terminate(); + + } + + } /* + * Save the new addr. + */ + this.addrNextRef.set(addrNext); + + /* * Note: Do not start the service here. It will be started by the - * next ReadTask, which will have the new value of addrNext. + * next ReadTask, which will have the then current value of addrNext. */ // if (addrNext != null) { // @@ -952,6 +1181,60 @@ } } - + + /** + * Method must be invoked when the upstream service is changed. The method + * is responsible for interrupting the current {@link RunTask} (if any) and + * closing the client socket connection that was used to receive data from + * the upstream service. A new connection will be accepted by the next + * {@link RunTask}. + */ + public void changeUpStream() { + + lock.lock(); + try { + + if (log.isInfoEnabled()) + log.info(""); + + if (readFuture != null) { + + // Interrupt the current receive operation. + readFuture.cancel(true/* mayInterruptIfRunning */); + + } + + /* + * Explicitly close the client socket channel. + */ + { + + final Client oldClient = clientRef.getAndSet(null); + + if (oldClient != null) { + + log.warn("Cleared Client reference."); + + try { + + oldClient.client.close(); + + } catch (IOException e) { + + log.warn(e, e); + + } + + } + + } + + } finally { + + lock.unlock(); + + } + + } + } - Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java 2012-10-30 11:17:27 UTC (rev 6692) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java 2012-10-30 18:03:37 UTC (rev 6693) @@ -69,6 +69,9 @@ * unsuccessful send gives us a strong indication of success or failure for the * data transfer which is independent of the RMI message and makes it trivial to * re-synchronize the {@link HASendService} since it is basically stateless. + * <p> + * Note: This class exposes its synchronization mechanism to + * {@link HAReceiveService}. * * @see HAReceiveService * @@ -82,7 +85,7 @@ /** * The Internet socket address of the receiving service. */ - private final AtomicReference<InetSocketAddress> addr = new AtomicReference<InetSocketAddress>(); + private final AtomicReference<InetSocketAddress> addrNext = new AtomicReference<InetSocketAddress>(); /** * A single threaded executor on which {@link SendTask}s will be executed. @@ -95,9 +98,12 @@ */ final private AtomicReference<SocketChannel> socketChannel = new AtomicReference<SocketChannel>(); + /* + * Note: toString() must be thread-safe. + */ public String toString() { - return super.toString() + "{addr=" + addr + "}"; + return super.toString() + "{addrNext=" + addrNext + "}"; } @@ -109,9 +115,9 @@ * * @see #start(InetSocketAddress) */ - public InetSocketAddress getAddr() { + public InetSocketAddress getAddrNext() { - return addr.get(); + return addrNext.get(); } @@ -139,13 +145,35 @@ } /** + * Return <code>true</code> iff running at the moment this method is + * evaluated. + */ + boolean isRunning() { + + return executorRef.get() != null; + + } + +// /** +// * Return the address of the receiving service (may be <code>null</code>). +// */ +// InetSocketAddress getAddrNext() { +// +// return addr.get(); +// +// } + + /** * Starts a thread which will transfer data to a service listening at the * specified {@link InetSocketAddress}. A {@link SocketChannel} will be * opened to the specified the connection to the socket specified in the * constructor and start the thread pool on which the payloads will be send. + * <p> + * Note: This class exposes its synchronization mechanism to + * {@link HAReceiveService}. * - * @param addr - * The Internet socket address of the receiving service. + * @param addrNext + * The Internet socket address of the downstream service. * * @see #terminate() * @@ -154,44 +182,53 @@ * @throws IllegalStateException * if this service is already running. */ -// * @throws IOException -// * if the {@link SocketChannel} can not be opened. - synchronized public void start(final InetSocketAddress addr) -// throws IOException - { + synchronized public void start(final InetSocketAddress addrNext) { - if (addr == null) + if (log.isDebugEnabled()) + log.debug(toString() + " : starting."); + + if (addrNext == null) throw new IllegalArgumentException(); - - // already running? - if (executorRef.get() != null) - throw new IllegalStateException(); - - if (log.isInfoEnabled()) - log.info(toString()); - this.addr.set(addr); + if (executorRef.get() != null) { + // already running. + log.error("Already running."); + + throw new IllegalStateException("Already running."); + + } + + this.addrNext.set(addrNext); + /* * Note: leave null until send() so we can lazily connect to the * downstream service. */ - this.socketChannel.set(null);//openChannel(addr) + this.socketChannel.set(null);// openChannel(addr) this.executorRef.set(Executors.newSingleThreadExecutor()); - + + if (log.isInfoEnabled()) + log.info(toString() + " : running."); + } /** * Immediate shutdown. Any transfer in process will be interrupted. It is * safe to invoke this method whether or not the service is running. + * <p> + * Note: This class exposes its synchronization mechanism to + * {@link HAReceiveService}. */ synchronized public void terminate() { - if (log.isInfoEnabled()) - log.info(toString()); + if (log.isDebugEnabled()) + log.debug(toString() + " : stopping."); final ExecutorService tmp = executorRef.getAndSet(null); if (tmp == null) { // Not running. + if (log.isInfoEnabled()) + log.info("Service was not running."); return; } try { @@ -209,17 +246,19 @@ // shutdown executor. tmp.shutdownNow(); // clear address. - addr.set(null); + addrNext.set(null); if (log.isInfoEnabled()) - log.info(toString()); + log.info(toString() + " : stopped."); } } /** * Send the bytes {@link ByteBuffer#remaining()} in the buffer to the - * configured {@link InetSocketAddress}. This operation DOES NOT have a side - * effect on the position, limit or mark for the buffer. + * configured {@link InetSocketAddress}. * <p> + * Note: This operation DOES NOT have a side effect on the position, limit + * or mark for the buffer. + * <p> * Note: In order to use efficient NIO operations this MUST be a direct * {@link ByteBuffer}. * @@ -240,8 +279,8 @@ * could not be opened. */ public Future<Void> send(final ByteBuffer buffer) { - - if (buffer == null) + + if (buffer == null) throw new IllegalArgumentException(); if (buffer.remaining() == 0) @@ -251,7 +290,7 @@ final ExecutorService tmp = executorRef.get(); if (tmp == null) - throw new IllegalStateException(); + throw new IllegalStateException("Service is not running."); if (log.isTraceEnabled()) log.trace("Will send " + buffer.remaining() + " bytes"); @@ -271,7 +310,7 @@ /* * Open the SocketChannel. * - * @todo we may have to retry or play with the timeout for + * TODO we may have to retry or play with the timeout for * the socket connect request since the downstream node may * see its pipelineAdd() after the upstream node sees its * pipelineChange() event. For example, given a pipeline @@ -295,7 +334,7 @@ * has seen the pipelineChange() event. */ - socketChannel.set(sc = openChannel(addr.get())); + socketChannel.set(sc = openChannel(addrNext.get())); } catch (IOException e) { @@ -415,7 +454,8 @@ nwritten += nbytes; if (log.isTraceEnabled()) - log.trace("Sent " + nbytes + " bytes with " + nwritten + " of out " + remaining + " written so far"); + log.trace("Sent " + nbytes + " bytes with " + nwritten + + " of out " + remaining + " written so far"); } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2012-10-30 11:17:27 UTC (rev 6692) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2012-10-30 18:03:37 UTC (rev 6693) @@ -34,7 +34,6 @@ import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; @@ -382,13 +381,6 @@ } /** - * The #of times the leader in a highly available quorum will attempt to - * retransmit the current write cache block if there is an error when - * sending that write cache block to the downstream node. - */ - protected final int RETRY_COUNT = 3; - - /** * Allocates N buffers from the {@link DirectBufferPool}. * * @param nbuffers @@ -523,7 +515,7 @@ * Thompson</a> */ private class WriteTask implements Callable<Void> { - + /** * Note: If there is an error in this thread then it needs to be * propagated to the threads write()ing on the cache or awaiting flush() @@ -582,6 +574,8 @@ // ... [truncated message content] |
From: <tho...@us...> - 2012-10-31 12:42:15
|
Revision: 6695 http://bigdata.svn.sourceforge.net/bigdata/?rev=6695&view=rev Author: thompsonbry Date: 2012-10-31 12:42:03 +0000 (Wed, 31 Oct 2012) Log Message: ----------- Bug fix to StatusServlet. The zookeeper jar was being dragged in as a dependency. See [1]. The fix isolates the quorum related code in an HAStatusServletUtil class. Bug fix to SPARQL UPDATE handling in BigdataRDFContext. The code was passing [os] rather than [baos]. See [2]. Bug fix to HAJournalServer. The code had an assumption that HALogs were enabled. See [3]. Bug fix to HAJournalServer.shutdownNow(). The code assumed that keepAlive was non-null, but this is not true when the constructor in the base class is executing. The correct fix is to remove the initialization from the constructor. At the moment, I have modified shutdownNow() to test for keepAlive != null. This issue showed up when zk was not running and there was an attempt to start an HAJournalServer. [1] http://sourceforge.net/apps/trac/bigdata/ticket/612 (Bigdata scale-up depends on zookeper) [2] http://sourceforge.net/apps/trac/bigdata/ticket/611 (The code that processes SPARQL Update has a typo) [3] https://sourceforge.net/apps/trac/bigdata/ticket/530 (Journal HA) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-30 19:15:43 UTC (rev 6694) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-31 12:42:03 UTC (rev 6695) @@ -91,7 +91,7 @@ /** * Logger for HA events. */ - protected static final Logger haLog = Logger.getLogger("com.bigdata.haLog"); + private static final Logger haLog = Logger.getLogger("com.bigdata.haLog"); /** * Configuration options for the {@link HAJournalServer}. @@ -583,8 +583,14 @@ @Override synchronized public void shutdownNow(final boolean destroy) { - if (keepAlive.compareAndSet(true/* expect */, false/* update */)) { + /* + * Note: keepAlive will be null if this code is invoked from within the + * constructor on the base class (AbstractServer). + */ + if (keepAlive != null + && keepAlive.compareAndSet(true/* expect */, false/* update */)) { + synchronized (keepAlive) { keepAlive.notifyAll(); @@ -1451,24 +1457,28 @@ final long commitCounter = journal.getRootBlockView() .getCommitCounter(); - final HALogWriter logWriter = journal.getHALogWriter(); + if (HA_LOG_ENABLED) { - if (msg.getCommitCounter() == logWriter.getCommitCounter() - && msg.getSequence() == (logWriter.getSequence() - 1)) { + final HALogWriter logWriter = journal.getHALogWriter(); - /* - * Duplicate message. This can occur due retrySend(). - * retrySend() is used to make the pipeline robust if a - * service (other than the leader) drops out and we need to - * change the connections between the services in the write - * pipeline in order to get the message through. - */ - - if (log.isInfoEnabled()) - log.info("Ignoring message (dup): " + msg); + if (msg.getCommitCounter() == logWriter.getCommitCounter() + && msg.getSequence() == (logWriter.getSequence() - 1)) { - return; + /* + * Duplicate message. This can occur due retrySend(). + * retrySend() is used to make the pipeline robust if a + * service (other than the leader) drops out and we need + * to change the connections between the services in the + * write pipeline in order to get the message through. + */ + if (log.isInfoEnabled()) + log.info("Ignoring message (dup): " + msg); + + return; + + } + } if (resyncFuture != null && !resyncFuture.isDone()) { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2012-10-30 19:15:43 UTC (rev 6694) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2012-10-31 12:42:03 UTC (rev 6695) @@ -1239,7 +1239,7 @@ // buffer the response here. baos = new ByteArrayOutputStream(); - listener = new SparqlUpdateResponseWriter(resp, os, charset, + listener = new SparqlUpdateResponseWriter(resp, baos, charset, false/* reportLoadProgress */, false/* flushEachEvent */); } Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2012-10-31 12:42:03 UTC (rev 6695) @@ -0,0 +1,305 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sail.webapp; + +import java.io.File; +import java.io.FilenameFilter; +import java.io.IOException; +import java.io.PrintWriter; +import java.util.UUID; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.zookeeper.KeeperException; + +import com.bigdata.ha.HAGlue; +import com.bigdata.ha.QuorumService; +import com.bigdata.ha.halog.HALogWriter; +import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.IIndexManager; +import com.bigdata.quorum.zk.ZKQuorumImpl; +import com.bigdata.zookeeper.DumpZookeeper; + +/** + * Class supports the {@link StatusServlet} and isolates code that has a + * dependency on zookeeper so we do not drag in zookeeper for embedded + * {@link NanoSparqlServer} deployments. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/612 (Bigdata scale-up + * depends on zookeper) + */ +public class HAStatusServletUtil { + + final private IIndexManager indexManager; + + public HAStatusServletUtil(final IIndexManager indexManager) { + + if (indexManager == null) + throw new IllegalArgumentException(); + + this.indexManager = indexManager; + + } + + /** + * Show the interesting things about the quorum. + * <ol> + * <li>QuorumState</li> + * <li>Who is the leader, who is a follower.</li> + * <li>What is the SPARQL end point for each leader and follower.</li> + * <li>Dump of the zookeeper state related to the quorum.</li> + * <li>listServices (into pre element).</li> + * </ol> + * + * @throws IOException + */ + public void showQuorum(final HttpServletRequest req, + final HttpServletResponse resp, final XMLBuilder.Node current) + throws IOException { + + if (!(indexManager instanceof AbstractJournal)) + return; + + final AbstractJournal journal = (AbstractJournal) indexManager; + + final ZKQuorumImpl<HAGlue, QuorumService<HAGlue>> quorum = (ZKQuorumImpl<HAGlue, QuorumService<HAGlue>>) journal + .getQuorum(); + + // The current token. + final long quorumToken = quorum.token(); + + // The last valid token. + final long lastValidToken = quorum.lastValidToken(); + + final int njoined = quorum.getJoined().length; + + final QuorumService<HAGlue> quorumService = quorum.getClient(); + + current.node("h1", "High Availability"); + + // The quorum state. + { + + final XMLBuilder.Node p = current.node("p"); + + p.text("The quorum is " + (quorum.isQuorumMet() ? "" : "not") + + " met.").node("br").close(); + + p.text("" + njoined + " out of " + quorum.replicationFactor() + + " services are joined.").node("br").close(); + + p.text("quorumToken=" + quorumToken + ", lastValidToken=" + + lastValidToken).node("br").close(); + + p.text("logicalServiceId=" + quorumService.getLogicalServiceId()).node("br") + .close(); + + /* + * Report on the Service. + */ + { + final File serviceDir = quorumService.getServiceDir(); + p.text("ServiceDir: path=" + serviceDir).node("br") + .close(); + } + + /* + * Report on the Journal. + */ + { + final File file = journal.getFile(); + if (file != null) { + p.text("DataDir: path=" + file.getParent()) + .node("br").close(); + } + } + + /* + * Report #of files and bytes in the HALog directory. + */ + { + final File haLogDir = quorumService.getHALogDir(); + final File[] a = haLogDir + .listFiles(new FilenameFilter() { + @Override + public boolean accept(File dir, String name) { + return name + .endsWith(HALogWriter.HA_LOG_EXT); + } + }); + int nfiles = 0; + long nbytes = 0L; + for (File file : a) { + nbytes += file.length(); + nfiles++; + } + p.text("HALogDir: nfiles=" + nfiles + ", nbytes=" + + nbytes + ", path=" + haLogDir).node("br") + .close(); + } + p.close(); + + current.node("pre", quorum.toString()); + + } + + /* + * Display the NSS port, host, and leader/follower/not-joined + * status for each service in the quorum. + */ + current.node("h2", "Quorum Services"); + { + final XMLBuilder.Node p = current.node("p"); + + final UUID[] joined = quorum.getJoined(); + + final UUID[] pipeline = quorum.getPipeline(); + + for (UUID serviceId : quorum.getMembers()) { + + final HAGlue remoteService; + try { + + remoteService = quorumService.getService(serviceId); + + } catch (RuntimeException ex) { + + /* + * Ignore. Might not be an HAGlue instance. + */ + + continue; + + } + + /* + * Note: This is not actually reporting the interface + * that the port is exposed to. + */ + + final String hostname = remoteService.getHostname(); + + final int nssPort = remoteService.getNSSPort(); + + final boolean isLeader = serviceId.equals(quorum + .getLeaderId()); + + final boolean isFollower = indexOf(serviceId, joined) > 0; + + final int pipelineIndex = indexOf(serviceId, pipeline); + + p.text(hostname + + " : nssPort=" + + nssPort + + " : " + + (isLeader ? "leader" + : (isFollower ? "follower" + : " is not joined")) + + ", pipelineOrder=" + + (pipelineIndex == -1 ? " is not in pipeline" + : pipelineIndex)).node("br").close(); + + } + + p.close(); + + } + + // DumpZookeeper + { + + current.node("h2", "Zookeeper"); + + // final XMLBuilder.Node section = current.node("pre"); + // flush writer before writing on PrintStream. + current.getBuilder().getWriter().flush(); + + // dump onto the response. + final PrintWriter out = new PrintWriter( + resp.getOutputStream(), true/* autoFlush */); + + out.print("<pre>\n"); + + try { + + final DumpZookeeper dump = new DumpZookeeper( + quorum.getZookeeper()); + + dump.dump(out, true/* showDatatrue */, + quorumService.getLogicalServiceId()/* zpath */, + 0/* depth */); + + } catch (InterruptedException e) { + + e.printStackTrace(out); + + } catch (KeeperException e) { + + e.printStackTrace(out); + + } + + // flush PrintWriter before resuming writes on Writer. + out.flush(); + + // close section. + out.print("\n</pre>"); + + } + + } + + /** + * Return the index of the given {@link UUID} in the array of {@link UUID}s. + * + * @param x + * The {@link UUID} + * @param a + * The array of {@link UUID}s. + * + * @return The index of the {@link UUID} in the array -or- <code>-1</code> + * if the {@link UUID} does not appear in the array. + */ + static private int indexOf(final UUID x, final UUID[] a) { + + if (x == null) + throw new IllegalArgumentException(); + + for (int i = 0; i < a.length; i++) { + + if (x.equals(a[i])) { + + return i; + + } + + } + + return -1; + + } + +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2012-10-30 19:15:43 UTC (rev 6694) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2012-10-31 12:42:03 UTC (rev 6695) @@ -22,8 +22,6 @@ */ package com.bigdata.rdf.sail.webapp; -import java.io.File; -import java.io.FilenameFilter; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; @@ -44,7 +42,6 @@ import javax.servlet.http.HttpServletResponse; import org.apache.log4j.Logger; -import org.apache.zookeeper.KeeperException; import com.bigdata.bop.BOpUtility; import com.bigdata.bop.PipelineOp; @@ -55,21 +52,16 @@ import com.bigdata.bop.engine.QueryLog; import com.bigdata.bop.fed.QueryEngineFactory; import com.bigdata.counters.CounterSet; -import com.bigdata.ha.HAGlue; -import com.bigdata.ha.QuorumService; -import com.bigdata.ha.halog.HALogWriter; import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.DumpJournal; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.Journal; -import com.bigdata.quorum.zk.ZKQuorumImpl; import com.bigdata.rdf.sail.sparql.ast.SimpleNode; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.RunningQuery; import com.bigdata.rdf.sparql.ast.ASTContainer; import com.bigdata.rdf.sparql.ast.QueryRoot; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.util.InnerCause; -import com.bigdata.zookeeper.DumpZookeeper; /** * A status page for the service. @@ -333,7 +325,7 @@ // final XMLBuilder.Node section = current.node("pre"); // flush writer before writing on PrintStream. - w.flush(); + doc.getWriter().flush(); // dump onto the response. final PrintWriter out = new PrintWriter(resp.getOutputStream(), @@ -371,212 +363,9 @@ && ((AbstractJournal) getIndexManager()) .isHighlyAvailable()) { - /* - * Show the interesting things about the quorum. - * - * 1. QuorumState - * - * 2. Who is the leader, who is a follower. - * - * 3. What is the SPARQL end point for each leader and follower - * (where will this be published? HAGlue? HAJournalServer admin - * interface?) - * - * 4. dumpZoo (into pre element). - * - * 5. listServices (into pre element). - */ + new HAStatusServletUtil(getIndexManager()) + .showQuorum(req, resp, current); - final AbstractJournal journal = (AbstractJournal) getIndexManager(); - - final ZKQuorumImpl<HAGlue, QuorumService<HAGlue>> quorum = (ZKQuorumImpl<HAGlue, QuorumService<HAGlue>>) journal - .getQuorum(); - - // The current token. - final long quorumToken = quorum.token(); - - // The last valid token. - final long lastValidToken = quorum.lastValidToken(); - - final int njoined = quorum.getJoined().length; - - final QuorumService<HAGlue> quorumService = quorum.getClient(); - - current.node("h1", "High Availability"); - - // The quorum state. - { - - final XMLBuilder.Node p = current.node("p"); - - p.text("The quorum is " - + (quorum.isQuorumMet() ? "" : "not") + " met.") - .node("br").close(); - - p.text("" + njoined + " out of " - + quorum.replicationFactor() - + " services are joined.").node("br").close(); - - p.text("quorumToken=" + quorumToken + ", lastValidToken=" - + lastValidToken).node("br").close(); - - p.text("logicalServiceId=" - + quorumService.getLogicalServiceId()).node("br") - .close(); - - /* - * Report on the Service. - */ - { - final File serviceDir = quorumService.getServiceDir(); - p.text("ServiceDir: path=" + serviceDir).node("br") - .close(); - } - - /* - * Report on the Journal. - */ - { - final File file = journal.getFile(); - if (file != null) { - p.text("DataDir: path=" + file.getParent()) - .node("br").close(); - } - } - - /* - * Report #of files and bytes in the HALog directory. - */ - { - final File haLogDir = quorumService.getHALogDir(); - final File[] a = haLogDir - .listFiles(new FilenameFilter() { - @Override - public boolean accept(File dir, String name) { - return name - .endsWith(HALogWriter.HA_LOG_EXT); - } - }); - int nfiles = 0; - long nbytes = 0L; - for (File file : a) { - nbytes += file.length(); - nfiles++; - } - p.text("HALogDir: nfiles=" + nfiles + ", nbytes=" - + nbytes + ", path=" + haLogDir).node("br") - .close(); - } - p.close(); - - current.node("pre", quorum.toString()); - - } - - /* - * Display the NSS port, host, and leader/follower/not-joined - * status for each service in the quorum. - */ - current.node("h2", "Quorum Services"); - { - final XMLBuilder.Node p = current.node("p"); - - final UUID[] joined = quorum.getJoined(); - - final UUID[] pipeline = quorum.getPipeline(); - - for (UUID serviceId : quorum.getMembers()) { - - final HAGlue remoteService; - try { - - remoteService = quorumService.getService(serviceId); - - } catch (RuntimeException ex) { - - /* - * Ignore. Might not be an HAGlue instance. - */ - - continue; - - } - - /* - * Note: This is not actually reporting the interface - * that the port is exposed to. - */ - - final String hostname = remoteService.getHostname(); - - final int nssPort = remoteService.getNSSPort(); - - final boolean isLeader = serviceId.equals(quorum - .getLeaderId()); - - final boolean isFollower = indexOf(serviceId, joined) > 0; - - final int pipelineIndex = indexOf(serviceId, pipeline); - - p.text(hostname - + " : nssPort=" - + nssPort - + " : " - + (isLeader ? "leader" - : (isFollower ? "follower" - : " is not joined")) - + ", pipelineOrder=" - + (pipelineIndex == -1 ? " is not in pipeline" - : pipelineIndex)).node("br").close(); - - } - - p.close(); - - } - - // DumpZookeeper - { - - current.node("h2", "Zookeeper"); - - // final XMLBuilder.Node section = current.node("pre"); - // flush writer before writing on PrintStream. - w.flush(); - - // dump onto the response. - final PrintWriter out = new PrintWriter( - resp.getOutputStream(), true/* autoFlush */); - - out.print("<pre>\n"); - - try { - - final DumpZookeeper dump = new DumpZookeeper( - quorum.getZookeeper()); - - dump.dump(out, true/* showDatatrue */, - quorumService.getLogicalServiceId()/* zpath */, - 0/* depth */); - - } catch (InterruptedException e) { - - e.printStackTrace(out); - - } catch (KeeperException e) { - - e.printStackTrace(out); - - } - - // flush PrintWriter before resuming writes on Writer. - out.flush(); - - // close section. - out.print("\n</pre>"); - - } - } current.node("br", "Accepted query count=" @@ -1038,34 +827,4 @@ }); } - /** - * Return the index of the given {@link UUID} in the array of {@link UUID}s. - * - * @param x - * The {@link UUID} - * @param a - * The array of {@link UUID}s. - * - * @return The index of the {@link UUID} in the array -or- <code>-1</code> - * if the {@link UUID} does not appear in the array. - */ - static private int indexOf(final UUID x, final UUID[] a) { - - if (x == null) - throw new IllegalArgumentException(); - - for (int i = 0; i < a.length; i++) { - - if (x.equals(a[i])) { - - return i; - - } - - } - - return -1; - - } - } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java 2012-10-30 19:15:43 UTC (rev 6694) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java 2012-10-31 12:42:03 UTC (rev 6695) @@ -96,6 +96,15 @@ // // } + /** + * Return the backing {@link Writer}. + */ + public Writer getWriter() { + + return m_writer; + + } + public XMLBuilder(final Writer w) throws IOException { this(true/* xml */, null/* encoding */, w/* writer */); @@ -186,6 +195,15 @@ } /** + * Return the {@link XMLBuilder} to which this {@link Node} belongs. + */ + public XMLBuilder getBuilder() { + + return XMLBuilder.this; + + } + + /** * Add an attribute value to an open element head. * * @param attr This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-10-31 17:08:55
|
Revision: 6696 http://bigdata.svn.sourceforge.net/bigdata/?rev=6696&view=rev Author: thompsonbry Date: 2012-10-31 17:08:43 +0000 (Wed, 31 Oct 2012) Log Message: ----------- Stubbed in support for disaster recovery based on replication of the leader's backing file plus replay of the HALogs. This involved a change to some of the HA interfaces (IHALogRequest is now a subinterface of IHASyncRequest). New methods were added to request the backing store from the leader, to do the appropriate raw reads on the backing store, and to replicate the data onto the pipeline. The HAJournalServer does not yet process the received write cache blocks for a rebuild operation. We need to provide an appropriate hook for the RWStore since it needs to handle these more like WORM writes than scattered writes. Also, the HALog replay is not yet in place. Methods were also stubbed in to allow the leader to request the digest of the stores from the followers. These methods have not been implemented yet. @see https://sourceforge.net/apps/trac/bigdata/ticket/530 (Journal HA) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlue.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlueDelegate.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAPipelineGlue.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HALogRequest.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHALogRequest.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/FileMetadata.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/quorum/zk/MockQuorumMember.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HARebuildRequest.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHARebuildRequest.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHASyncRequest.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlue.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlue.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlue.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -79,14 +79,16 @@ */ /** - * Return a root block for the persistence store. The initial root blocks - * are identical, so this may be used to create a new journal in a quorum by - * replicating the root blocks of the quorum leader. + * Return the then current root block for the persistence store. + * <p> + * Note: The initial root blocks are identical, so this may be used to + * create a new journal in a quorum by replicating the root blocks of the + * quorum leader. * * @param msg - * The message requesting the root block. + * The message requesting the then current root block. * - * @return The root block. + * @return The then current root block. */ IHARootBlockResponse getRootBlock(IHARootBlockRequest msg) throws IOException; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlueDelegate.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlueDelegate.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlueDelegate.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -37,8 +37,10 @@ import com.bigdata.ha.msg.IHALogRootBlocksResponse; import com.bigdata.ha.msg.IHAReadRequest; import com.bigdata.ha.msg.IHAReadResponse; +import com.bigdata.ha.msg.IHARebuildRequest; import com.bigdata.ha.msg.IHARootBlockRequest; import com.bigdata.ha.msg.IHARootBlockResponse; +import com.bigdata.ha.msg.IHASyncRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.journal.ValidationError; @@ -100,8 +102,8 @@ return delegate.abort2Phase(abortMessage); } - public Future<Void> receiveAndReplicate(IHALogRequest req, - IHAWriteMessage msg) throws IOException { + public Future<Void> receiveAndReplicate(final IHASyncRequest req, + final IHAWriteMessage msg) throws IOException { return delegate.receiveAndReplicate(req, msg); } @@ -182,4 +184,9 @@ return delegate.getNSSPort(); } + @Override + public Future<Void> sendHAStore(IHARebuildRequest msg) throws IOException { + return delegate.sendHAStore(msg); + } + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAPipelineGlue.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAPipelineGlue.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAPipelineGlue.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -36,6 +36,8 @@ import com.bigdata.ha.msg.IHALogRequest; import com.bigdata.ha.msg.IHALogRootBlocksRequest; import com.bigdata.ha.msg.IHALogRootBlocksResponse; +import com.bigdata.ha.msg.IHARebuildRequest; +import com.bigdata.ha.msg.IHASyncRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.writecache.WriteCache; import com.bigdata.journal.WriteExecutorService; @@ -129,7 +131,7 @@ * @return The {@link Future} which will become available once the buffer * transfer is complete. */ - Future<Void> receiveAndReplicate(IHALogRequest req, IHAWriteMessage msg) + Future<Void> receiveAndReplicate(IHASyncRequest req, IHAWriteMessage msg) throws IOException; /** @@ -176,4 +178,50 @@ */ Future<Void> sendHALogForWriteSet(IHALogRequest msg) throws IOException; + /** + * Send the raw blocks for the requested backing store across the write + * pipeline. + * <p> + * Note: This method supports disaster recovery of a service from a met + * quorum. This procedure can only be used when a met quorum exists. + * <p> + * Note: DO NOT use a {@link ThickFuture} for the returned {@link Future}. + * That will defeat the ability of the requester to cancel the + * {@link Future}. + * + * @param req + * A request to replicate a backing store. + * + * @return A {@link Future} that may be used to cancel the remote process + * sending the data through the write pipeline. + */ + Future<Void> sendHAStore(IHARebuildRequest msg) throws IOException; + + /** + * TODO Method to compute a digest for the committed allocations on a + * backing store as of the commit point on which the specified transaction + * is reading. This may be used to verify that the backing stores are + * logically consistent even when they may have some discarded writes that + * are not present on all stores (from aborted write sets). + * <p> + * The caller can get the root blocks for the commit counter associated with + * the txId (if we can the readsOnCommitTime). + * <p> + * The RWStore needs to snapshot the allocators while holding the allocation + * lock and that snapshot MUST be for the same commit point. Therefore, this + * operation needs to be submitted by the leader in code that can guarantee + * that the leader does not go through a commit point. The snapshots should + * be stored at the nodes, not shipped to the leader. The leader therefore + * needs to know when each snapshot is ready so it can exit the critical + * code and permit additional writes (releasing the allocLock or the + * semaphore on the journal). + * + * <pre> + * IHAStoreChecksumRequest {storeUUID, txid (of readLock)} + * + * IHAStoreCheckSumResponse {MD5Digest} + * </pre> + */ +// Future<IHASnapshotDigestResponse> computeSnapshotDigest(IHASnapshotDigestRequest req) throws IOException; + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -32,7 +32,7 @@ import java.util.concurrent.Future; import com.bigdata.ha.halog.HALogWriter; -import com.bigdata.ha.msg.IHALogRequest; +import com.bigdata.ha.msg.IHASyncRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.writecache.WriteCache; import com.bigdata.journal.IRootBlockView; @@ -62,9 +62,9 @@ * to the related {@link QuorumMember} events. * * @param req - * A request for an HALog (optional). This is only non-null when - * historical {@link WriteCache} blocks are being replayed down - * the write pipeline in order to synchronize a service. + * A synchronization request (optional). This is only non-null + * when historical {@link WriteCache} blocks are being replayed + * down the write pipeline in order to synchronize a service. * @param msg * The RMI metadata about the payload. * @param b @@ -72,7 +72,7 @@ * transmitted (note that the #of bytes remaining in the buffer * MUST agree with {@link IHAWriteMessage#getSize()}). */ - Future<Void> replicate(IHALogRequest req, IHAWriteMessage msg, ByteBuffer b) + Future<Void> replicate(IHASyncRequest req, IHAWriteMessage msg, ByteBuffer b) throws IOException; /** @@ -81,13 +81,13 @@ * master, including the last node in the failover chain. * * @param req - * A request for an HALog (optional). This is only non-null when - * historical {@link WriteCache} blocks are being replayed down - * the write pipeline in order to synchronize a service. + * A synchronization request (optional). This is only non-null + * when historical {@link WriteCache} blocks are being replayed + * down the write pipeline in order to synchronize a service. * @param msg * The RMI metadata about the payload. */ - Future<Void> receiveAndReplicate(IHALogRequest req, IHAWriteMessage msg) + Future<Void> receiveAndReplicate(IHASyncRequest req, IHAWriteMessage msg) throws IOException; /* Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -46,6 +46,7 @@ import com.bigdata.ha.msg.HAWriteMessageBase; import com.bigdata.ha.msg.IHALogRequest; import com.bigdata.ha.msg.IHAMessage; +import com.bigdata.ha.msg.IHASyncRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.ha.pipeline.HAReceiveService; import com.bigdata.ha.pipeline.HAReceiveService.IHAReceiveCallback; @@ -649,10 +650,10 @@ private static final long serialVersionUID = 1L; - final IHALogRequest req; + final IHASyncRequest req; final IHAWriteMessage msg; - public HAMessageWrapper(final IHALogRequest req, + public HAMessageWrapper(final IHASyncRequest req, final IHAWriteMessage msg) { // Use size and checksum from real IHAWriteMessage. @@ -720,7 +721,7 @@ * This is the leader, so send() the buffer. */ @Override - public Future<Void> replicate(final IHALogRequest req, + public Future<Void> replicate(final IHASyncRequest req, final IHAWriteMessage msg, final ByteBuffer b) throws IOException { final RunnableFuture<Void> ft; @@ -751,10 +752,10 @@ private class RobustReplicateTask implements Callable<Void> { /** - * An historical message is indicated when the {@link IHALogRequest} is + * An historical message is indicated when the {@link IHASyncRequest} is * non-<code>null</code>. */ - private final IHALogRequest req; + private final IHASyncRequest req; /** * The {@link IHAWriteMessage}. @@ -786,8 +787,8 @@ */ static protected final int RETRY_SLEEP = 50; - public RobustReplicateTask(final IHALogRequest req, final IHAWriteMessage msg, - final ByteBuffer b) { + public RobustReplicateTask(final IHASyncRequest req, + final IHAWriteMessage msg, final ByteBuffer b) { // Note: [req] MAY be null. @@ -930,7 +931,7 @@ } - } + } // call() /** * Robust retransmit of the current cache block. This method is designed to @@ -1010,14 +1011,14 @@ static private class SendBufferTask<S extends HAPipelineGlue> implements Callable<Void> { - private final IHALogRequest req; + private final IHASyncRequest req; private final IHAWriteMessage msg; private final ByteBuffer b; private final PipelineState<S> downstream; private final HASendService sendService; private final Lock sendLock; - public SendBufferTask(final IHALogRequest req, + public SendBufferTask(final IHASyncRequest req, final IHAWriteMessage msg, final ByteBuffer b, final PipelineState<S> downstream, final HASendService sendService, final Lock sendLock) { @@ -1108,7 +1109,7 @@ private final Lock sendLock = new ReentrantLock(); @Override - public Future<Void> receiveAndReplicate(final IHALogRequest req, + public Future<Void> receiveAndReplicate(final IHASyncRequest req, final IHAWriteMessage msg) throws IOException { final RunnableFuture<Void> ft; @@ -1198,13 +1199,13 @@ private static class ReceiveAndReplicateTask<S extends HAPipelineGlue> implements Callable<Void> { - private final IHALogRequest req; + private final IHASyncRequest req; private final IHAWriteMessage msg; private final ByteBuffer b; private final PipelineState<S> downstream; private final HAReceiveService<HAMessageWrapper> receiveService; - public ReceiveAndReplicateTask(final IHALogRequest req, + public ReceiveAndReplicateTask(final IHASyncRequest req, final IHAWriteMessage msg, final ByteBuffer b, final PipelineState<S> downstream, final HAReceiveService<HAMessageWrapper> receiveService) { @@ -1277,15 +1278,19 @@ * Core implementation handles the message and payload when received on a * service. * + * @param req + * The synchronization request (optional). When non- + * <code>null</code> the msg and payload are historical data. + * When <code>null</code> they are live data. * @param msg * Metadata about a buffer containing data replicated to this * node. * @param data * The buffer containing the data. - * + * * @throws Exception */ - abstract protected void handleReplicatedWrite(final IHALogRequest req, + abstract protected void handleReplicatedWrite(final IHASyncRequest req, final IHAWriteMessage msg, final ByteBuffer data) throws Exception; /** Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -39,7 +39,7 @@ import org.apache.log4j.Logger; -import com.bigdata.ha.msg.IHALogRequest; +import com.bigdata.ha.msg.IHASyncRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.IResourceManager; @@ -103,7 +103,7 @@ addListener(this.pipelineImpl = new QuorumPipelineImpl<S>(this) { @Override - protected void handleReplicatedWrite(final IHALogRequest req, + protected void handleReplicatedWrite(final IHASyncRequest req, final IHAWriteMessage msg, final ByteBuffer data) throws Exception { @@ -210,7 +210,7 @@ // } @Override - public Future<Void> receiveAndReplicate(final IHALogRequest req, + public Future<Void> receiveAndReplicate(final IHASyncRequest req, final IHAWriteMessage msg) throws IOException { return pipelineImpl.receiveAndReplicate(req, msg); @@ -218,7 +218,7 @@ } @Override - public Future<Void> replicate(final IHALogRequest req, + public Future<Void> replicate(final IHASyncRequest req, final IHAWriteMessage msg, final ByteBuffer b) throws IOException { return pipelineImpl.replicate(req, msg, b); @@ -229,6 +229,10 @@ * Core implementation handles the message and payload when received on a * service. * + * @param req + * The synchronization request (optional). When non- + * <code>null</code> the message and payload are historical data. + * When <code>null</code> they are live data. * @param msg * Metadata about a buffer containing data replicated to this * node. @@ -237,9 +241,10 @@ * * @throws Exception * - * @see QuorumPipelineImpl#handleReplicatedWrite(IHAWriteMessage, ByteBuffer) + * @see QuorumPipelineImpl#handleReplicatedWrite(IHAWriteMessage, + * ByteBuffer) */ - abstract protected void handleReplicatedWrite(IHALogRequest req, + abstract protected void handleReplicatedWrite(IHASyncRequest req, IHAWriteMessage msg, ByteBuffer data) throws Exception; /** Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HALogRequest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HALogRequest.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HALogRequest.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -34,6 +34,7 @@ private final UUID serviceId; private final long commitCounter; + private final boolean incremental; /** * @param serviceId @@ -42,10 +43,12 @@ * The commit counter used to identify the desired commit point * (the commit counter of the closing root block). */ - public HALogRequest(final UUID serviceId, final long commitCounter) { + public HALogRequest(final UUID serviceId, final long commitCounter, + final boolean incremental) { this.serviceId = serviceId; this.commitCounter = commitCounter; + this.incremental = incremental; } @@ -66,8 +69,15 @@ public String toString() { return getClass() + "{serviceId=" + getServiceId() + ", commitCounter=" - + getCommitCounter() + "}"; + + getCommitCounter() + ", incremental=" + isIncremental() + "}"; } + + @Override + public boolean isIncremental() { + + return incremental; + + } } Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HARebuildRequest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HARebuildRequest.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HARebuildRequest.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -0,0 +1,68 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha.msg; + +import java.util.UUID; + +public class HARebuildRequest implements IHARebuildRequest { + + /** + * + */ + private static final long serialVersionUID = 1L; + + private final UUID serviceId; + + /** + * @param serviceId + * The {@link UUID} of the service that made the request. + */ + public HARebuildRequest(final UUID serviceId) { + + this.serviceId = serviceId; + + } + + @Override + public UUID getServiceId() { + + return serviceId; + + } + + public String toString() { + + return getClass() + "{serviceId=" + getServiceId() + ", incremental=" + + isIncremental() + "}"; + + } + + @Override + final public boolean isIncremental() { + + return false; + + } + +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHALogRequest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHALogRequest.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHALogRequest.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -23,12 +23,10 @@ */ package com.bigdata.ha.msg; -import java.util.UUID; - /** * Message requesting the root blocks and other metadata for an HA Log file. */ -public interface IHALogRequest extends IHAMessage { +public interface IHALogRequest extends IHASyncRequest { /** * The commit counter is used to identify the desired commit point (the @@ -36,8 +34,4 @@ */ long getCommitCounter(); - /** - * The UUID of the service that issued this request. - */ - UUID getServiceId(); } Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHARebuildRequest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHARebuildRequest.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHARebuildRequest.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -0,0 +1,31 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha.msg; + +/** + * Message requesting raw blocks from the backing file for a store. + */ +public interface IHARebuildRequest extends IHASyncRequest { + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHASyncRequest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHASyncRequest.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHASyncRequest.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -0,0 +1,48 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha.msg; + +import java.util.UUID; + +/** + * An abstract message for either re-synchronizing (incremental catch up) or + * re-building (ground up rebuild). + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public interface IHASyncRequest extends IHAMessage { + + /** + * When <code>true</code> the request is part of an incremental + * re-synchronization. When <code>false</code> the request is part of + * a total re-build. + */ + boolean isIncremental(); + + /** + * The UUID of the service that issued this request. + */ + UUID getServiceId(); + +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -98,8 +98,10 @@ import com.bigdata.ha.msg.IHALogRootBlocksResponse; import com.bigdata.ha.msg.IHAReadRequest; import com.bigdata.ha.msg.IHAReadResponse; +import com.bigdata.ha.msg.IHARebuildRequest; import com.bigdata.ha.msg.IHARootBlockRequest; import com.bigdata.ha.msg.IHARootBlockResponse; +import com.bigdata.ha.msg.IHASyncRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.htree.HTree; import com.bigdata.io.DirectBufferPool; @@ -1297,14 +1299,14 @@ } /** - * The delegate that implements the {@link BufferMode}. + * Return the delegate that implements the {@link BufferMode}. * <p> * Note: this method MUST NOT check to see whether the journal is open since * we need to use it if we want to invoke * {@link IBufferStrategy#deleteResources()} and we can only invoke that * method once the journal is closed. */ - final public IBufferStrategy getBufferStrategy() { + public IBufferStrategy getBufferStrategy() { return _bufferStrategy; @@ -5540,7 +5542,7 @@ * Delegated to HAQuorumService. */ @Override - public Future<Void> receiveAndReplicate(final IHALogRequest req, + public Future<Void> receiveAndReplicate(final IHASyncRequest req, final IHAWriteMessage msg) throws IOException { if (haLog.isDebugEnabled()) @@ -5573,7 +5575,16 @@ throw new UnsupportedOperationException(); } + /* + * This is implemented by HAJournal. + */ @Override + public Future<Void> sendHAStore(IHARebuildRequest msg) + throws IOException { + throw new UnsupportedOperationException(); + } + + @Override public IHARootBlockResponse getRootBlock( final IHARootBlockRequest msg) { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/FileMetadata.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/FileMetadata.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/FileMetadata.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -210,7 +210,7 @@ * blocks. This is used as an offset when computing the index of a record in * the journal. */ - static final int headerSize0 = SIZE_MAGIC + SIZE_VERSION + (SIZEOF_ROOT_BLOCK * 2); + public static final int headerSize0 = SIZE_MAGIC + SIZE_VERSION + (SIZEOF_ROOT_BLOCK * 2); /** * Depending on the mode, this will be either a direct buffer, a mapped Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -29,9 +29,11 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.security.MessageDigest; import java.util.concurrent.Future; import com.bigdata.ha.msg.IHALogRequest; +import com.bigdata.ha.msg.IHARebuildRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.IBufferAccess; import com.bigdata.io.writecache.WriteCache; @@ -59,6 +61,8 @@ * Send an {@link IHAWriteMessage} and the associated raw buffer through the * write pipeline. * + * @param req + * The {@link IHALogRequest} for some HALog file. * @param msg * The {@link IHAWriteMessage}. * @param b @@ -74,6 +78,42 @@ IBufferAccess b) throws IOException, InterruptedException; /** + * Send an {@link IHAWriteMessage} and the associated raw buffer through the + * write pipeline. + * + * @param req + * The {@link IHARebuildRequest} to replicate the backing file to + * the requesting service. + * @param sequence + * The sequence of this {@link IHAWriteMessage} (origin ZERO + * (0)). + * @param quorumToken + * The quorum token of the leader, which must remain valid across + * the rebuild protocol. + * @param fileExtent + * The file extent as of the moment that the leader begins to + * replicate the existing backing file. + * @param offset + * The starting offset (relative to the root blocks). + * @param nbytes + * The #of bytes to be sent. + * @param b + * The raw buffer. The buffer will be cleared and filled with the + * specified data, then sent down the write pipeline. + * + * @return The {@link Future} for that request. + * + * @throws IOException + * @throws InterruptedException + */ + Future<Void> sendRawBuffer(IHARebuildRequest req, + //long commitCounter, + //long commitTime, + long sequence, long quorumToken, long fileExtent, + long offset, int nbytes, ByteBuffer b) throws IOException, + InterruptedException; + + /** * Read from the local store in support of failover reads on nodes in a * highly available {@link Quorum}. * @@ -102,4 +142,20 @@ */ long getBlockSequence(); + /** + * Snapshot the allocators in preparation for computing a digest of the + * committed allocations. + * + * @return The snapshot in a format that is backing store specific. + */ + Object snapshotAllocators(); + + /** + * Compute the digest using the snapshot. + * + * @param snapshot + * The allocator snapshot. + */ + void computeDigest(Object snapshot, MessageDigest digest); + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -29,6 +29,7 @@ import java.io.InputStream; import java.io.RandomAccessFile; import java.nio.ByteBuffer; +import java.security.MessageDigest; import java.util.UUID; import java.util.concurrent.Future; @@ -38,6 +39,7 @@ import com.bigdata.counters.CounterSet; import com.bigdata.ha.QuorumRead; import com.bigdata.ha.msg.IHALogRequest; +import com.bigdata.ha.msg.IHARebuildRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.IBufferAccess; import com.bigdata.mdi.IResourceMetadata; @@ -641,6 +643,30 @@ } + @Override + public Future<Void> sendRawBuffer(final IHARebuildRequest req, + // long commitCounter, long commitTime, + final long sequence, final long quorumToken, final long fileExtent, + final long offset, final int nbytes, final ByteBuffer b) + throws IOException, InterruptedException { + + return m_store.sendRawBuffer(req, /* commitCounter, commitTime, */ + sequence, quorumToken, fileExtent, offset, nbytes, b); + + } + + @Override + public Object snapshotAllocators() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException(); + } + + @Override + public void computeDigest(Object snapshot, MessageDigest digest) { + // TODO Auto-generated method stub + throw new UnsupportedOperationException(); + } + public ByteBuffer readFromLocalStore(final long addr) throws InterruptedException { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -29,6 +29,7 @@ import java.nio.ByteBuffer; import java.nio.channels.Channel; import java.nio.channels.FileChannel; +import java.security.MessageDigest; import java.util.Map; import java.util.UUID; import java.util.concurrent.Future; @@ -46,7 +47,9 @@ import com.bigdata.ha.HAPipelineGlue; import com.bigdata.ha.QuorumPipeline; import com.bigdata.ha.QuorumRead; +import com.bigdata.ha.msg.HAWriteMessage; import com.bigdata.ha.msg.IHALogRequest; +import com.bigdata.ha.msg.IHARebuildRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.FileChannelUtility; import com.bigdata.io.IBufferAccess; @@ -2145,6 +2148,7 @@ } + @Override public void truncate(final long newExtent) { final long newUserExtent = newExtent - headerSize; @@ -2273,6 +2277,7 @@ * @todo why is this synchronized? the operation should be safe. maybe * against a concurrent close? */ + @Override synchronized public long transferTo(final RandomAccessFile out) throws IOException { @@ -2302,6 +2307,7 @@ * Note: The file is NOT closed and re-opened in a read-only mode in order * to avoid causing difficulties for concurrent readers. */ + @Override public void closeForWrites() { // sets the [readOnly] flag. @@ -2338,10 +2344,12 @@ * This implementation can not release storage allocations and invocations * of this method are ignored. */ + @Override public void delete(long addr) { // NOP } + @Override public void writeRawBuffer(final IHAWriteMessage msg, final IBufferAccess b) throws IOException, InterruptedException { @@ -2415,6 +2423,40 @@ return remoteWriteFuture; } + @Override + public Future<Void> sendRawBuffer(final IHARebuildRequest req, +// final long commitCounter, final long commitTime, + final long sequence, final long quorumToken, final long fileExtent, + final long offset, final int nbytes, final ByteBuffer b) + throws IOException, InterruptedException { + + // read direct from store + final ByteBuffer clientBuffer = b; + clientBuffer.position(0); + clientBuffer.limit(nbytes); + + readRaw(nbytes, offset, clientBuffer); + + assert clientBuffer.remaining() > 0 : "Empty buffer: " + clientBuffer; + + @SuppressWarnings("unchecked") + final QuorumPipeline<HAPipelineGlue> quorumMember = (QuorumPipeline<HAPipelineGlue>) quorum + .getMember(); + + final int chk = ChecksumUtility.threadChk.get().checksum(b); + + final IHAWriteMessage msg = new HAWriteMessage(-1L/* commitCounter */, + -1L/* commitTime */, sequence, nbytes, chk, StoreTypeEnum.WORM, + quorumToken, fileExtent, offset/* firstOffset */); + + final Future<Void> remoteWriteFuture = quorumMember.replicate(req, msg, + clientBuffer); + + return remoteWriteFuture; + + } + + @Override public void setExtentForLocalStore(final long extent) throws IOException, InterruptedException { @@ -2422,10 +2464,23 @@ } + @Override public void resetFromHARootBlock(final IRootBlockView rootBlock) { nextOffset.set(rootBlock.getNextOffset()); } + @Override + public Object snapshotAllocators() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException(); + } + + @Override + public void computeDigest(Object snapshot, MessageDigest digest) { + // TODO Auto-generated method stub + throw new UnsupportedOperationException(); + } + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -63,7 +63,9 @@ import com.bigdata.counters.striped.StripedCounters; import com.bigdata.ha.HAPipelineGlue; import com.bigdata.ha.QuorumPipeline; +import com.bigdata.ha.msg.HAWriteMessage; import com.bigdata.ha.msg.IHALogRequest; +import com.bigdata.ha.msg.IHARebuildRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.FileChannelUtility; import com.bigdata.io.IBufferAccess; @@ -83,6 +85,7 @@ import com.bigdata.journal.IHABufferStrategy; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.RootBlockView; +import com.bigdata.journal.StoreTypeEnum; import com.bigdata.quorum.Quorum; import com.bigdata.rawstore.IRawStore; import com.bigdata.service.AbstractTransactionService; @@ -5081,6 +5084,97 @@ } + /** + * @see IHABufferStrategy#sendRawBuffer(IHARebuildRequest, long, + * long, long, long, int, ByteBuffer) + */ + public Future<Void> sendRawBuffer(final IHARebuildRequest req, +// final long commitCounter, final long commitTime, + final long sequence, final long quorumToken, final long fileExtent, + final long offset, final int nbytes, final ByteBuffer b) + throws IOException, InterruptedException { + + // read direct from store + final ByteBuffer clientBuffer = b; + clientBuffer.position(0); + clientBuffer.limit(nbytes); + + readRaw(nbytes, offset, clientBuffer); + + assert clientBuffer.remaining() > 0 : "Empty buffer: " + clientBuffer; + + @SuppressWarnings("unchecked") + final QuorumPipeline<HAPipelineGlue> quorumMember = (QuorumPipeline<HAPipelineGlue>) m_quorum + .getMember(); + + final int chk = ChecksumUtility.threadChk.get().checksum(b); + + final IHAWriteMessage msg = new HAWriteMessage(-1L/* commitCounter */, + -1L/* commitTime */, sequence, nbytes, chk, StoreTypeEnum.RW, + quorumToken, fileExtent, offset/* firstOffset */); + + final Future<Void> remoteWriteFuture = quorumMember.replicate(req, msg, + clientBuffer); + + return remoteWriteFuture; + + } + + /** + * Read on the backing file. + * + * @param nbytes + * The #of bytes to read. + * @param offset + * The offset of the first byte (relative to the start of the + * data region). + * @param dst + * Where to put the data. Bytes will be written at position until + * limit. + * @return The caller's buffer, prepared for reading. + */ + private ByteBuffer readRaw(final int nbytes, final long offset, + final ByteBuffer dst) { + + final Lock readLock = m_extensionLock.readLock(); + readLock.lock(); + try { + + try { + + // the offset into the disk file. + final long pos = FileMetadata.headerSize0 + offset; + + // read on the disk. + final int ndiskRead = FileChannelUtility.readAll(m_reopener, + dst, pos); + + // update performance counters. + final StoreCounters<?> c = (StoreCounters<?>) storeCounters + .get().acquire(); + try { + c.ndiskRead += ndiskRead; + } finally { + c.release(); + } + + } catch (IOException ex) { + + throw new RuntimeException(ex); + + } + + // flip for reading. + dst.flip(); + + return dst; + } finally { + + readLock.unlock(); + } + + } + public int getMaxBlobSize() { return m_maxBlobAllocSize-4; // allow for checksum } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -56,6 +56,8 @@ import com.bigdata.ha.msg.IHALogRequest; import com.bigdata.ha.msg.IHALogRootBlocksRequest; import com.bigdata.ha.msg.IHALogRootBlocksResponse; +import com.bigdata.ha.msg.IHARebuildRequest; +import com.bigdata.ha.msg.IHASyncRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.DirectBufferPool; import com.bigdata.io.FileChannelUtility; @@ -185,7 +187,7 @@ } @Override - public Future<Void> receiveAndReplicate(final IHALogRequest req, + public Future<Void> receiveAndReplicate(final IHASyncRequest req, final IHAWriteMessage msg) throws IOException { return ((QuorumPipeline<HAPipelineGlue>) member) @@ -254,6 +256,12 @@ throw new UnsupportedOperationException(); } + @Override + public Future<Void> sendHAStore(IHARebuildRequest msg) + throws IOException { + throw new UnsupportedOperationException(); + } + } // class MockHAPipelineGlue /** @@ -288,7 +296,7 @@ addListener(this.pipelineImpl = new QuorumPipelineImpl<S>(this){ - protected void handleReplicatedWrite(final IHALogRequest req, + protected void handleReplicatedWrite(final IHASyncRequest req, final IHAWriteMessage msg, final ByteBuffer data) throws Exception { @@ -400,7 +408,7 @@ // } @Override - public Future<Void> receiveAndReplicate(final IHALogRequest req, + public Future<Void> receiveAndReplicate(final IHASyncRequest req, final IHAWriteMessage msg) throws IOException { return pipelineImpl.receiveAndReplicate(req, msg); @@ -408,7 +416,7 @@ } @Override - public Future<Void> replicate(IHALogRequest req, + public Future<Void> replicate(IHASyncRequest req, final IHAWriteMessage msg, final ByteBuffer b) throws IOException { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -46,7 +46,7 @@ import com.bigdata.ha.HAGlue; import com.bigdata.ha.QuorumService; import com.bigdata.ha.QuorumServiceBase; -import com.bigdata.ha.msg.IHALogRequest; +import com.bigdata.ha.msg.IHASyncRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.AbstractJournalTestCase; @@ -447,7 +447,7 @@ * WriteCache and let that lay it down onto the disk. */ @Override - protected void handleReplicatedWrite(IHALogRequest req, + protected void handleReplicatedWrite(IHASyncRequest req, IHAWriteMessage msg, ByteBuffer data) throws Exception { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -59,6 +59,8 @@ import com.bigdata.ha.msg.IHALogRequest; import com.bigdata.ha.msg.IHALogRootBlocksRequest; import com.bigdata.ha.msg.IHALogRootBlocksResponse; +import com.bigdata.ha.msg.IHARebuildRequest; +import com.bigdata.ha.msg.IHASyncRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.quorum.MockQuorumFixture.MockQuorum.MockQuorumWatcher; import com.bigdata.util.InnerCause; @@ -1285,7 +1287,7 @@ } @Override - public Future<Void> receiveAndReplicate(final IHALogRequest req, + public Future<Void> receiveAndReplicate(final IHASyncRequest req, IHAWriteMessage msg) throws IOException { throw new UnsupportedOperationException(); } @@ -1302,6 +1304,12 @@ throw new UnsupportedOperationException(); } + @Override + public Future<Void> sendHAStore(IHARebuildRequest msg) + throws IOException { + throw new UnsupportedOperationException(); + } + } // MockService } // MockQuorumMember Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -28,6 +28,7 @@ import java.io.IOException; import java.io.Serializable; import java.net.InetSocketAddress; +import java.nio.ByteBuffer; import java.rmi.Remote; import java.rmi.server.ExportException; import java.util.Properties; @@ -55,13 +56,16 @@ import com.bigdata.ha.msg.IHALogRequest; import com.bigdata.ha.msg.IHALogRootBlocksRequest; import com.bigdata.ha.msg.IHALogRootBlocksResponse; +import com.bigdata.ha.msg.IHARebuildRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.DirectBufferPool; import com.bigdata.io.IBufferAccess; import com.bigdata.io.writecache.WriteCache; import com.bigdata.journal.BufferMode; +import com.bigdata.journal.FileMetadata; import com.bigdata.journal.IHABufferStrategy; import com.bigdata.journal.IRootBlockView; +import com.bigdata.journal.ITx; import com.bigdata.journal.Journal; import com.bigdata.journal.ValidationError; import com.bigdata.journal.WORMStrategy; @@ -223,6 +227,18 @@ return haLogWriter; } + + /** + * {@inheritDoc} + * <p> + * Overridden to strengthen the return type. + */ + @Override + public IHABufferStrategy getBufferStrategy() { + + return (IHABufferStrategy) super.getBufferStrategy(); + + } public HAJournal(final Properties properties) { @@ -550,11 +566,11 @@ try { while (r.hasMoreBuffers()) { - - // IHABufferStrategy - final IHABufferStrategy strategy = (IHABufferStrategy) HAJournal.this - .getBufferStrategy(); - + + // IHABufferStrategy + final IHABufferStrategy strategy = HAJournal.this + .getBufferStrategy(); + // get message and fill write cache buffer (unless WORM). final IHAWriteMessage msg = r.processNextBuffer(buf.buffer()); @@ -591,7 +607,154 @@ } + /* + * FIXME REBUILD: Take a read lock and send everything from the backing + * file, but do not include the root blocks. The first buffer can be + * short (to exclude the root blocks). That will put the rest of the + * buffers on a 1MB boundary which will provide more efficient IOs. + */ @Override + public Future<Void> sendHAStore(final IHARebuildRequest req) + throws IOException { + + if (haLog.isDebugEnabled()) + haLog.debug("req=" + req); + + // Task sends an HALog file along the pipeline. + final FutureTask<Void> ft = new FutureTaskMon<Void>( + new SendStoreTask(req)); + + // Run task. + getExecutorService().submit(ft); + + // Return *ASYNCHRONOUS* proxy (interruptable). + return getProxy(ft, true/* asynch */); + + } + + /** + * Class sends the backing file along the write pipeline. + */ + private class SendStoreTask implements Callable<Void> { + + private final IHARebuildRequest req; + + public SendStoreTask(final IHARebuildRequest req) { + + if(req == null) + throw new IllegalArgumentException(); + + this.req = req; + + } + + public Void call() throws Exception { + + // The quorum token (must remain valid through this operation). + final long quorumToken = getQuorumToken(); + + // Grab a read lock. + final long txId = newTx(ITx.READ_COMMITTED); + IBufferAccess buf = null; + try { + + try { + // Acquire a buffer. + buf = DirectBufferPool.INSTANCE.acquire(); + } catch (InterruptedException ex) { + // Wrap and re-throw. + throw new IOException(ex); + } + + // The backing ByteBuffer. + final ByteBuffer b = buf.buffer(); + + // The capacity of that buffer (typically 1MB). + final int bufferCapacity = b.capacity(); + + // The size of the root blocks (which we skip). + final int headerSize = FileMetadata.headerSize0; + + /* + * The size of the file at the moment we begin. We will not + * replicate data on new extensions of the file. Those data will + * be captured by HALog files that are replayed by the service + * that is doing the rebuild. + */ + final long fileExtent = getBufferStrategy().getExtent(); + + // The #of bytes to be transmitted. + final long totalBytes = fileExtent - headerSize; + + // The #of bytes remaining. + long remaining = totalBytes; + + // The offset (relative to the root blocks). + long offset = 0L; + + long sequence = 0L; + + if (log.isInfoEnabled()) + log.info("Sending store file: nbytes=" + totalBytes); + + while (remaining > 0) { + + int nbytes = (int) Math.min((long) bufferCapacity, + remaining); + + if (sequence == 0L && nbytes == bufferCapacity + && remaining > bufferCapacity) { + + /* + * Adjust the first block so the remainder will be + * aligned on the bufferCapacity boundaries (IO + * efficiency). + */ + nbytes -= headerSize; + + } + + if (log.isDebugEnabled()) + log.debug("Sending block: sequence=" + sequence + + ", offset=" + offset + ", nbytes=" + nbytes); + + getBufferStrategy().sendRawBuffer(req, sequence, + quorumToken, fileExtent, offset, nbytes, b); + + remaining -= nbytes; + + sequence++; + + } + + if (log.isInfoEnabled()) + log.info("Sent store file: #blocks=" + sequence + + ", #bytes=" + (fileExtent - headerSize)); + + // Done. + return null; + + } finally { + + if (buf != null) { + try { + // Release the direct buffer. + buf.release(); + } catch (InterruptedException e) { + log.warn(e); + } + } + + // Release the read lock. + abort(txId); + + } + + } + + } // class SendStoreTask + + @Override public Future<Void> bounceZookeeperConnection() { final FutureTask<Void> ft = new FutureTaskMon<Void>(new Runnable() { @SuppressWarnings("rawtypes") Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-31 12:42:03 UTC (rev 6695) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-10-31 17:08:43 UTC (rev 6696) @@ -49,9 +49,12 @@ import com.bigdata.ha.halog.HALogWriter; import com.bigdata.ha.msg.HALogRequest; import com.bigdata.ha.msg.HALogRootBlocksRequest; +import com.bigdata.ha.msg.HARebuildRequest; import com.bigdata.ha.msg.HARootBlockRequest; import com.bigdata.ha.msg.IHALogRequest; import com.bigdata.ha.msg.IHALogRootBlocksResponse; +import com.bigdata.ha.msg.IHARootBlockResponse; +import com.bigdata.ha.msg.IHASyncRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.IBufferAccess; import com.bigdata.io.writecache.WriteCache; @@ -950,11 +953,53 @@ journal.doLocalAbort(); /* - * FIXME REBUILD : Implement logic to copy all data from - * the leader's journal (except the root block) and then - * apply the HA Log files up to the commit point pinned - * by the readLock. + * Make a note of the root block that is in effect when we being + * the rebuild procedure. Once we have replicated the backing + * store from the leader, we will need to replay all HALog files + * starting with commitCounter+1 for this root block. Once we + * catch up, we can atomically join and lay down the root blocks + * from the leader for the most recent commit point. */ + final IHARootBlockResponse rootBlockAtStartOfRebuild = leader + .getRootBlock(new HARootBlockRequest(null/* storeUUID */)); + + /* + * Replicate the backing store of the leader. + * + * Note: We are holding a read lock, so committed allocations + * will not be overwritten. However, the replicated backing file + * will not be logically consistent until we apply all HALog + * files since the commit point noted above. + * + *... [truncated message content] |
From: <tho...@us...> - 2012-11-06 16:53:54
|
Revision: 6701 http://bigdata.svn.sourceforge.net/bigdata/?rev=6701&view=rev Author: thompsonbry Date: 2012-11-06 16:53:42 +0000 (Tue, 06 Nov 2012) Log Message: ----------- - Setup CI for HA. There are now unit tests that cover many of the common scenarios. Several issues were addressed related to reliable HAJournalServer shutdown. The BTC 2012 FOAF data sets were added for up to three degrees of separation. A known problem exists with a data race for concurrent create of the default KB and concurrent discovery of the default KB. The AbstractHAJournalServerTestCase.awaitKBCreate() method documents the problem and includes a partial workaround (sleeping a bit before and after each discovery attempt). A unit test has been created that replicates the problem and also shows two additional ways in which this data race can fail. That test is incorporated into CI with this commit. In order to commit these tests, I need to enable the HALog files. This is controlled by a static boolean in HAJournalServer (HA_LOG_ENABLED). Those HALog files are used to resynchronize disconnected services. They are generated for every write set and purged at time the quorum is fully met when it goes through a 2-phase commit. If you update, you will notice these log files the next time you load data through a quorum. If you only have 2 services and a quorum replication factor of 3, then the HALog files WILL NOT be deleted since the quorum will never be fully met. This could significantly increase your storage requirements for people testing with a quorum having only 2 services. In order to restore the previous behavior, simply set HAJournalServer. HA_LOG_ENABLED := false in the code. The HALog files will allow you to start the 3rd service and achieve a fully met quorum (3 out of 3 services are fully synchronized). However, we are still working through some known issues with respect to the atomic decision for the 3rd service to join the met quorum. We are also working through the logic for a disaster recovery of a service through a full replication and rebuild from the existing quorum members. Until we get this all worked out, you can restore the existing experience by setting HAJournalServer.HA_LOG_ENABLED := false. - Added HAGlue method to suspend writes while taking snapshot backup. The method returns Future. Writes resume when Future is cancelled. This provides an exclusive low level write lock on the Journal. This gives you an opportunity to take a snapshot backup. You would then cancel the global write lock when the backup was complete. The quorum will remain available to readers during the backup procedure. Obviously, obtaining and releasing this lock needs to be coordinated with the backup mechanism. A simple Java program could be written to obtain the lock and then yield it when signaled by the completion of the backup software. - We have been looking at an internal http proxy for the NSS that would proxy all write requests against any NSS instance to the quorum leader and would proxy read requests against an NSS node that was not synchronized with the quorum to a node that was synchronized with the quorum. This proxy pattern would insulate clients from any awareness of the state of the individual NSS instances. However, we have not yet implemented this proxy pattern. We have some notes on how to support incremental online backup (versus snapshot backup) using the write replication mechanisms, but nothing has been implemented to support this. Likewise, we have some notes on logging http mutation requests to a write ahead log that could be used to replay transactions against a quorum. However, again, nothing has been implemented here. - Modified BigdataStatics to log more lines on process startup to stdout. The old default was only showing the banner information. - BytesUtil - added variants on toArray(ByteBuffer) that can reuse the caller's array. - HAGlue - added methods to compute digests for the Journal and the HALog files; added method to request and cancel a global write lock for use in coordinating backups; added method exposing the RunState of the HAJournalServer. - AbstractQuorumTestCase.assertCondition(...) - moved methods into shared test class (Test3). These methods are now reused by the HA CI test suite. - Javadoc on IServiceListener. - ProcessHelper.getEnvironment() - now writes the environment using "export" for non-Windows platforms. This should not cause a conflict as nothing relied on the generated startup files - they were there for debugging. - Moved the CreateKBTask into the same package as the BigdataSail. Added a test suite for problems with concurrent create and discovery. This test suite demonstrates problems that must be fixed. - The NSS status page now shows the services in pipeline order and provides links to those services for click through. - Moved some methods from RemoteRepository (for the NSS) into ConnectOptions to facilitate reuse in the CI HA test suite. @see https://sourceforge.net/apps/trac/bigdata/ticket/530 (Journal HA) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/BigdataStatics.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/BytesUtil.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlue.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlueDelegate.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAPipelineGlue.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/TestCase3.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/quorum/AbstractQuorumTestCase.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/jini/start/IServiceListener.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/jini/start/ServiceStarter.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/jini/start/config/ServiceConfiguration.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/jini/start/process/ProcessHelper.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/jini/TestAll.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/quorum/zk/AbstractZkQuorumTestCase.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/service/jini/util/JiniServicesHelper.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/zookeeper/AbstractZooTestCase.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithSids.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithoutSids.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/RunState.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HADigestRequest.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HADigestResponse.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HAGlobalWriteLockRequest.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HALogDigestRequest.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HALogDigestResponse.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHADigestRequest.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHADigestResponse.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHAGlobalWriteLockRequest.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHALogDigestRequest.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHALogDigestResponse.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerGlobalWriteLock.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/jiniClient.config branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/resources/data/foaf/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/resources/data/foaf/README.txt branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/resources/data/foaf/data-0.nq.gz branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/resources/data/foaf/data-1.nq.gz branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/resources/data/foaf/data-2.nq.gz branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/resources/data/foaf/data-3.nq.gz branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/CreateKBTask.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestConcurrentKBCreate.java Removed Paths: ------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/ha/ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CreateKBTask.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/BigdataStatics.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/BigdataStatics.java 2012-11-05 14:00:22 UTC (rev 6700) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/BigdataStatics.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -51,10 +51,14 @@ * easy to track down why a child process dies during service start. If you * want to see more output from the child process, then you should set the * log level for the {@link ProcessHelper} class to INFO. + * <p> + * Note: This needs to be more than the length of the {@link Banner} output + * in order for anything related to the process behavior to be echoed on + * {@link System#out}. * * @see ProcessHelper */ - public static int echoProcessStartupLineCount = 20; + public static int echoProcessStartupLineCount = 100;//Integer.MAX_VALUE;//100 /** * Global switch controlling whether true thread local buffers or striped Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/BytesUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/BytesUtil.java 2012-11-05 14:00:22 UTC (rev 6700) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/BytesUtil.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -1455,37 +1455,47 @@ */ static public byte[] toArray(final ByteBuffer b) { - return toArray(b, false/* forceCopy */); + return toArray(b, false/* forceCopy */, null/* dst */); } - /** - * Return a byte[] having the data in the {@link ByteBuffer} from the - * {@link ByteBuffer#position()} to the {@link ByteBuffer#limit()}. The - * position, limit, and mark are not affected by this operation. - * <p> - * Under certain circumstances it is possible and may be desirable to return - * the backing {@link ByteBuffer#array}. This behavior is enabled by - * <code>forceCopy := false</code>. - * <p> - * It is possible to return the backing byte[] when the {@link ByteBuffer} - * has a backing array, the array offset is ZERO (0), and the - * {@link ByteBuffer#limit()} is equal to the {@link ByteBuffer#capacity()} - * then the backing array is returned. Otherwise, a new byte[] must be - * allocated, and the data are copied into that byte[], which may then be - * returned. - * - * @param b - * The {@link ByteBuffer}. - * @param forceCopy - * When <code>false</code>, the backing array will be returned if - * possible. - * - * @return The byte[]. - */ - static public byte[] toArray(final ByteBuffer b, final boolean forceCopy) { + /** + * Return a byte[] having the data in the {@link ByteBuffer} from the + * {@link ByteBuffer#position()} to the {@link ByteBuffer#limit()}. The + * position, limit, and mark are not affected by this operation. + * <p> + * Under certain circumstances it is possible and may be desirable to return + * the backing {@link ByteBuffer#array}. This behavior is enabled by + * <code>forceCopy := false</code>. + * <p> + * It is possible to return the backing byte[] when the {@link ByteBuffer} + * has a backing array, the array offset is ZERO (0), and the + * {@link ByteBuffer#limit()} is equal to the {@link ByteBuffer#capacity()} + * then the backing array is returned. Otherwise, a new byte[] must be + * allocated, and the data are copied into that byte[], which may then be + * returned. + * + * @param b + * The {@link ByteBuffer}. + * @param forceCopy + * When <code>false</code>, the backing array will be returned if + * possible. + * @param dst + * A byte[] provided by the caller (optional). When non- + * <code>null</code> and having a length GTE + * {@link ByteBuffer#remaining()}, this array will be preferred + * to a newly allocated array. + * + * @return The byte[] having the data. When <i>dst</i> is non- + * <code>null</code> this MAY be the caller's array. When it is the + * caller's array, it MAY be larger than the #of bytes actually + * read. + */ + static public byte[] toArray(final ByteBuffer b, final boolean forceCopy, + final byte[] dst) { - if (b.hasArray() && b.arrayOffset() == 0 && b.position() == 0) { + if (!forceCopy && b.hasArray() && b.arrayOffset() == 0 + && b.position() == 0) { // && b.limit() == b.capacity() @@ -1507,9 +1517,10 @@ final int len = tmp.remaining(); - final byte[] a = new byte[len]; + final byte[] a = dst != null && dst.length >= len ? dst : new byte[len]; - tmp.get(a); + // Transfer only the available bytes. + tmp.get(a, 0, len); return a; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlue.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlue.java 2012-11-05 14:00:22 UTC (rev 6700) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlue.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -23,14 +23,24 @@ */ package com.bigdata.ha; +import java.io.FileNotFoundException; import java.io.IOException; import java.rmi.Remote; +import java.security.DigestException; +import java.security.NoSuchAlgorithmException; import java.util.concurrent.Future; +import java.util.concurrent.TimeoutException; +import com.bigdata.ha.msg.IHADigestRequest; +import com.bigdata.ha.msg.IHADigestResponse; +import com.bigdata.ha.msg.IHAGlobalWriteLockRequest; +import com.bigdata.ha.msg.IHALogDigestRequest; +import com.bigdata.ha.msg.IHALogDigestResponse; import com.bigdata.ha.msg.IHARootBlockRequest; import com.bigdata.ha.msg.IHARootBlockResponse; import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.ITransactionService; +import com.bigdata.journal.Journal; import com.bigdata.service.IService; /** @@ -56,7 +66,7 @@ * * @todo Move to an HAAdminGlue interface? */ - + /** * This method may be issued to force the service to close and then reopen * its zookeeper connection. This is a drastic action which will cause all @@ -98,4 +108,64 @@ */ int getNSSPort() throws IOException; + /** + * The {@link RunState} of the service. + */ + RunState getRunState() throws IOException; + + /** + * Compute the digest of the backing store - <strong>THIS METHOD IS ONLY FOR + * DIAGNOSTIC PURPOSES.</strong> + * <p> + * The digest is useless if there are concurrent writes since it can not be + * meaningfully compared with the digest of another store unless both stores + * are known to be stable. + */ + IHADigestResponse computeDigest(IHADigestRequest req) throws IOException, + NoSuchAlgorithmException, DigestException; + + /** + * Compute the digest of a HALog file - <strong>THIS METHOD IS ONLY FOR + * DIAGNOSTIC PURPOSES.</strong> + * <p> + * The digest is useless if there are concurrent writes since it can not be + * meaningfully compared with the digest of another store unless both stores + * are known to be stable. + * + * @throws FileNotFoundException + * if the HALog for the specified commit point does not exist. + */ + IHALogDigestResponse computeHALogDigest(IHALogDigestRequest req) throws IOException, + NoSuchAlgorithmException, DigestException; + + /** + * Obtain a global write lock on the leader. The lock only blocks writers. + * Readers may continue to execute without delay. + * <p> + * You can not obtain a coherent backup of the {@link Journal} while there + * are concurrent write operations. This method may be used to coordinate + * full backups of the {@link Journal} by suspending low level writes on the + * backing file. + * <p> + * This method will block until the lock is held, the lock request is + * interrupted, or the lock request timeout expires. + * + * @param req + * The request. + * + * @return A {@link Future} for the lock. The lock may be released by + * canceling the {@link Future}. The lock is acquired before this + * method returns and is held while the {@link Future} is running. + * If the {@link Future#isDone()} then the lock is no longer held. + * + * @throws IOException + * if there is an RMI problem. + * @throws TimeoutException + * if a timeout expires while awaiting the global lock. + * @throws InterruptedException + * if interrupted while awaiting the lock. + */ + Future<Void> globalWriteLock(IHAGlobalWriteLockRequest req) + throws IOException, TimeoutException, InterruptedException; + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlueDelegate.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlueDelegate.java 2012-11-05 14:00:22 UTC (rev 6700) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAGlueDelegate.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -26,12 +26,20 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.rmi.RemoteException; +import java.security.DigestException; +import java.security.NoSuchAlgorithmException; import java.util.UUID; import java.util.concurrent.Future; +import java.util.concurrent.TimeoutException; import com.bigdata.ha.msg.IHA2PhaseAbortMessage; import com.bigdata.ha.msg.IHA2PhaseCommitMessage; import com.bigdata.ha.msg.IHA2PhasePrepareMessage; +import com.bigdata.ha.msg.IHADigestRequest; +import com.bigdata.ha.msg.IHADigestResponse; +import com.bigdata.ha.msg.IHAGlobalWriteLockRequest; +import com.bigdata.ha.msg.IHALogDigestRequest; +import com.bigdata.ha.msg.IHALogDigestResponse; import com.bigdata.ha.msg.IHALogRequest; import com.bigdata.ha.msg.IHALogRootBlocksRequest; import com.bigdata.ha.msg.IHALogRootBlocksResponse; @@ -185,8 +193,31 @@ } @Override + public RunState getRunState() throws IOException { + return delegate.getRunState(); + } + + @Override public Future<Void> sendHAStore(IHARebuildRequest msg) throws IOException { return delegate.sendHAStore(msg); } + @Override + public IHADigestResponse computeDigest(final IHADigestRequest req) + throws IOException, NoSuchAlgorithmException, DigestException { + return delegate.computeDigest(req); + } + + @Override + public IHALogDigestResponse computeHALogDigest(final IHALogDigestRequest req) + throws IOException, NoSuchAlgorithmException, DigestException { + return delegate.computeHALogDigest(req); + } + + @Override + public Future<Void> globalWriteLock(final IHAGlobalWriteLockRequest req) + throws IOException, TimeoutException, InterruptedException { + return delegate.globalWriteLock(req); + } + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAPipelineGlue.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAPipelineGlue.java 2012-11-05 14:00:22 UTC (rev 6700) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/HAPipelineGlue.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -197,31 +197,33 @@ */ Future<Void> sendHAStore(IHARebuildRequest msg) throws IOException; - /** - * TODO Method to compute a digest for the committed allocations on a - * backing store as of the commit point on which the specified transaction - * is reading. This may be used to verify that the backing stores are - * logically consistent even when they may have some discarded writes that - * are not present on all stores (from aborted write sets). - * <p> - * The caller can get the root blocks for the commit counter associated with - * the txId (if we can the readsOnCommitTime). - * <p> - * The RWStore needs to snapshot the allocators while holding the allocation - * lock and that snapshot MUST be for the same commit point. Therefore, this - * operation needs to be submitted by the leader in code that can guarantee - * that the leader does not go through a commit point. The snapshots should - * be stored at the nodes, not shipped to the leader. The leader therefore - * needs to know when each snapshot is ready so it can exit the critical - * code and permit additional writes (releasing the allocLock or the - * semaphore on the journal). - * - * <pre> - * IHAStoreChecksumRequest {storeUUID, txid (of readLock)} - * - * IHAStoreCheckSumResponse {MD5Digest} - * </pre> - */ +// /** +// * There is something for this on HAGlue right now. +// * +// * TODO Method to compute a digest for the committed allocations on a +// * backing store as of the commit point on which the specified transaction +// * is reading. This may be used to verify that the backing stores are +// * logically consistent even when they may have some discarded writes that +// * are not present on all stores (from aborted write sets). +// * <p> +// * The caller can get the root blocks for the commit counter associated with +// * the txId (if we can the readsOnCommitTime). +// * <p> +// * The RWStore needs to snapshot the allocators while holding the allocation +// * lock and that snapshot MUST be for the same commit point. Therefore, this +// * operation needs to be submitted by the leader in code that can guarantee +// * that the leader does not go through a commit point. The snapshots should +// * be stored at the nodes, not shipped to the leader. The leader therefore +// * needs to know when each snapshot is ready so it can exit the critical +// * code and permit additional writes (releasing the allocLock or the +// * semaphore on the journal). +// * +// * <pre> +// * IHAStoreChecksumRequest {storeUUID, txid (of readLock)} +// * +// * IHAStoreCheckSumResponse {MD5Digest} +// * </pre> +// */ // Future<IHASnapshotDigestResponse> computeSnapshotDigest(IHASnapshotDigestRequest req) throws IOException; } Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/RunState.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/RunState.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/RunState.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -0,0 +1,42 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha; + +/** + * Service run state enumeration. + */ +public enum RunState { + + Start(0), Running(1), ShuttingDown(2), Shutdown(3); + + private RunState(final int level) { + + this.level = level; + + } + + @SuppressWarnings("unused") + private final int level; + +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java 2012-11-05 14:00:22 UTC (rev 6700) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -31,19 +31,20 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.security.DigestException; +import java.security.MessageDigest; import org.apache.log4j.Logger; +import com.bigdata.btree.BytesUtil; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.DirectBufferPool; import com.bigdata.io.FileChannelUtility; import com.bigdata.io.IBufferAccess; import com.bigdata.io.IReopenChannel; -import com.bigdata.journal.IHABufferStrategy; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.RootBlockUtility; import com.bigdata.journal.StoreTypeEnum; -import com.bigdata.journal.WORMStrategy; import com.bigdata.util.ChecksumError; import com.bigdata.util.ChecksumUtility; @@ -487,4 +488,101 @@ } + @Override + public void computeDigest(final MessageDigest digest) + throws DigestException, IOException { + + computeDigest(reopener, digest); + + } + + static void computeDigest(final IReopenChannel<FileChannel> reopener, + final MessageDigest digest) throws DigestException, IOException { + + IBufferAccess buf = null; + try { + + try { + // Acquire a buffer. + buf = DirectBufferPool.INSTANCE.acquire(); + } catch (InterruptedException ex) { + // Wrap and re-throw. + throw new IOException(ex); + } + + // The backing ByteBuffer. + final ByteBuffer b = buf.buffer(); + + // A byte[] with the same capacity as that ByteBuffer. + final byte[] a = new byte[b.capacity()]; + + // The capacity of that buffer (typically 1MB). + final int bufferCapacity = b.capacity(); + + // The size of the file at the moment we begin. + final long fileExtent = reopener.reopenChannel().size(); + + // The #of bytes whose digest will be computed. + final long totalBytes = fileExtent; + + // The #of bytes remaining. + long remaining = totalBytes; + + // The offset. + long offset = 0L; + + // The block sequence. + long sequence = 0L; + + if (log.isInfoEnabled()) + log.info("Computing digest: nbytes=" + totalBytes); + + while (remaining > 0) { + + final int nbytes = (int) Math.min((long) bufferCapacity, + remaining); + + if (log.isDebugEnabled()) + log.debug("Computing digest: sequence=" + sequence + + ", offset=" + offset + ", nbytes=" + nbytes); + + // Setup for read. + b.position(0); + b.limit(nbytes); + + // read block + FileChannelUtility.readAll(reopener, b, offset); + + // Copy data into our byte[]. + final byte[] c = BytesUtil.toArray(b, false/* forceCopy */, a); + + // update digest + digest.digest(c, 0/* off */, nbytes/* len */); + + remaining -= nbytes; + + } + + if (log.isInfoEnabled()) + log.info("Computed digest: #blocks=" + sequence + ", #bytes=" + + totalBytes); + + // Done. + return; + + } finally { + + if (buf != null) { + try { + // Release the direct buffer. + buf.release(); + } catch (InterruptedException e) { + log.warn(e); + } + } + + } + + } + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2012-11-05 14:00:22 UTC (rev 6700) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -29,12 +29,13 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.security.DigestException; +import java.security.MessageDigest; import java.util.Formatter; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.log4j.Logger; -import org.eclipse.jetty.util.log.Log; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.FileChannelUtility; @@ -876,5 +877,13 @@ } } + @Override + public void computeDigest(MessageDigest digest) throws DigestException, + IOException { + + HALogReader.computeDigest(m_state.reopener, digest); + + } + } } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java 2012-11-05 14:00:22 UTC (rev 6700) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -25,6 +25,8 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.security.DigestException; +import java.security.MessageDigest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.DirectBufferPool; @@ -81,4 +83,11 @@ */ IHAWriteMessage processNextBuffer(final ByteBuffer clientBuffer) throws IOException; + /** + * Compute the digest. If the {@link IHALogReader} is backed by the live + * HALog, then only the digest of the then current extent will be computed. + */ + void computeDigest(MessageDigest digest) throws DigestException, + IOException; + } Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HADigestRequest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HADigestRequest.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HADigestRequest.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -0,0 +1,48 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha.msg; + +import java.io.Serializable; +import java.util.UUID; + +public class HADigestRequest implements IHADigestRequest, Serializable { + + private static final long serialVersionUID = 1L; + + private final UUID storeUUID; + + public HADigestRequest(final UUID storeUUID) { + + this.storeUUID = storeUUID; + + } + + @Override + public UUID getStoreUUID() { + + return storeUUID; + + } + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HADigestResponse.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HADigestResponse.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HADigestResponse.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -0,0 +1,58 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha.msg; + +import java.io.Serializable; +import java.util.UUID; + +public class HADigestResponse implements IHADigestResponse, Serializable { + + private static final long serialVersionUID = 1L; + + private final UUID storeUUID; + private final byte[] digest; + + public HADigestResponse(final UUID storeUUID, final byte[] digest) { + + this.storeUUID = storeUUID; + + this.digest = digest; + + } + + @Override + public UUID getStoreUUID() { + + return storeUUID; + + } + + @Override + public byte[] getDigest() { + + return digest; + + } + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HAGlobalWriteLockRequest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HAGlobalWriteLockRequest.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HAGlobalWriteLockRequest.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -0,0 +1,85 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha.msg; + +import java.io.Serializable; +import java.util.concurrent.TimeUnit; + +public class HAGlobalWriteLockRequest implements IHAGlobalWriteLockRequest, + Serializable { + + /** + * + */ + private static final long serialVersionUID = 1L; + + private final long lockWaitTimeout; + private final TimeUnit lockWaitUnits; + private final long lockHoldTimeout; + private final TimeUnit lockHoldUnits; + + public HAGlobalWriteLockRequest(final long lockWaitTimeout, + final TimeUnit lockWaitUnits, final long lockHoldTimeout, + final TimeUnit lockHoldUnits) { + + if (lockWaitTimeout <= 0) + throw new IllegalArgumentException(); + + if (lockHoldTimeout <= 0) + throw new IllegalArgumentException(); + + if (lockWaitUnits == null) + throw new IllegalArgumentException(); + + if (lockHoldUnits == null) + throw new IllegalArgumentException(); + + this.lockWaitTimeout = lockWaitTimeout; + this.lockHoldTimeout = lockHoldTimeout; + this.lockWaitUnits = lockWaitUnits; + this.lockHoldUnits = lockHoldUnits; + + } + + @Override + public long getLockWaitTimeout() { + return lockWaitTimeout; + } + + @Override + public TimeUnit getLockWaitUnits() { + return lockWaitUnits; + } + + @Override + public long getLockHoldTimeout() { + return lockHoldTimeout; + } + + @Override + public TimeUnit getLockHoldUnits() { + return lockHoldUnits; + } + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HALogDigestRequest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HALogDigestRequest.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HALogDigestRequest.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -0,0 +1,48 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha.msg; + +import java.io.Serializable; +import java.util.UUID; + +public class HALogDigestRequest implements IHALogDigestRequest, Serializable { + + private static final long serialVersionUID = 1L; + + private final long commitCounter; + + public HALogDigestRequest(final long commitCounter) { + + this.commitCounter = commitCounter; + + } + + @Override + public long getCommitCounter() { + + return commitCounter; + + } + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HALogDigestResponse.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HALogDigestResponse.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/HALogDigestResponse.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -0,0 +1,57 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha.msg; + +import java.io.Serializable; + +public class HALogDigestResponse implements IHALogDigestResponse, Serializable { + + private static final long serialVersionUID = 1L; + + private final long commitCounter; + private final byte[] digest; + + public HALogDigestResponse(final long commitCounter, final byte[] digest) { + + this.commitCounter = commitCounter; + + this.digest = digest; + + } + + @Override + public long getCommitCounter() { + + return commitCounter; + + } + + @Override + public byte[] getDigest() { + + return digest; + + } + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHADigestRequest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHADigestRequest.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHADigestRequest.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -0,0 +1,46 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha.msg; + +import java.util.UUID; + +import com.bigdata.rawstore.IRawStore; + +/** + * Message used to request the digest of a backing store. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public interface IHADigestRequest extends IHAMessage { + + /** + * The {@link UUID} identifying the {@link IRawStore} for which the record + * was requested (optional, defaults to the current Journal). + * <p> + * Note: This parameter is intended for scale-out if there is a need to + * fetch the root block of a historical journal (versus the live journal). + */ + UUID getStoreUUID(); + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHADigestResponse.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHADigestResponse.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHADigestResponse.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -0,0 +1,51 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha.msg; + +import java.util.UUID; + +import com.bigdata.rawstore.IRawStore; + +/** + * Message used to communicate the digest of a backing store. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public interface IHADigestResponse extends IHAMessage { + + /** + * The {@link UUID} identifying the {@link IRawStore} for which the record + * was requested (optional, defaults to the current Journal). + * <p> + * Note: This parameter is intended for scale-out if there is a need to + * fetch the root block of a historical journal (versus the live journal). + */ + UUID getStoreUUID(); + + /** + * The computed disgest. + */ + byte[] getDigest(); + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHAGlobalWriteLockRequest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHAGlobalWriteLockRequest.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHAGlobalWriteLockRequest.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -0,0 +1,55 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha.msg; + +import java.util.concurrent.TimeUnit; + +/** + * Message requesting a global write lock. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public interface IHAGlobalWriteLockRequest extends IHAMessage { + + /** + * The maximum amount of time to wait for the lock. + */ + long getLockWaitTimeout(); + + /** + * The units for the timeout. + */ + TimeUnit getLockWaitUnits(); + + /** + * The maximum amount of time to hold the lock. + */ + long getLockHoldTimeout(); + + /** + * The units for the timeout. + */ + TimeUnit getLockHoldUnits(); + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHALogDigestRequest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHALogDigestRequest.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHALogDigestRequest.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -0,0 +1,41 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha.msg; + + +/** + * Message used to request the digest of the HALog file associated with + * a specified commit point. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public interface IHALogDigestRequest extends IHAMessage { + + /** + * The commit counter for the closing root block of the requested HALog + * file. + */ + long getCommitCounter(); + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHALogDigestResponse.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHALogDigestResponse.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/ha/msg/IHALogDigestResponse.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -0,0 +1,45 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.ha.msg; + +/** + * Message used to communicate the digest of an HALog file associated with + * a specific commit point. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public interface IHALogDigestResponse extends IHAMessage { + + /** + * The commit counter for the closing root block of the requested HALog + * file. + */ + long getCommitCounter(); + + /** + * The computed disgest. + */ + byte[] getDigest(); + +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-11-05 14:00:22 UTC (rev 6700) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -35,6 +35,8 @@ import java.nio.channels.Channel; import java.nio.channels.FileChannel; import java.rmi.RemoteException; +import java.security.DigestException; +import java.security.NoSuchAlgorithmException; import java.util.Arrays; import java.util.Iterator; import java.util.LinkedHashSet; @@ -48,6 +50,7 @@ import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -87,12 +90,18 @@ import com.bigdata.counters.Instrument; import com.bigdata.ha.HAGlue; import com.bigdata.ha.QuorumService; +import com.bigdata.ha.RunState; import com.bigdata.ha.msg.HAReadResponse; import com.bigdata.ha.msg.HARootBlockRequest; import com.bigdata.ha.msg.HARootBlockResponse; import com.bigdata.ha.msg.IHA2PhaseAbortMessage; import com.bigdata.ha.msg.IHA2PhaseCommitMessage; import com.bigdata.ha.msg.IHA2PhasePrepareMessage; +import com.bigdata.ha.msg.IHADigestRequest; +import com.bigdata.ha.msg.IHADigestResponse; +import com.bigdata.ha.msg.IHAGlobalWriteLockRequest; +import com.bigdata.ha.msg.IHALogDigestRequest; +import com.bigdata.ha.msg.IHALogDigestResponse; import com.bigdata.ha.msg.IHALogRequest; import com.bigdata.ha.msg.IHALogRootBlocksRequest; import com.bigdata.ha.msg.IHALogRootBlocksResponse; @@ -4693,7 +4702,7 @@ throw new AssertionError(); } - + /* * Both a meet and a break require an exclusive write lock. */ @@ -5018,6 +5027,38 @@ } + @Override + public RunState getRunState() { + + throw new UnsupportedOperationException(); + + } + + @Override + public IHADigestResponse computeDigest(final IHADigestRequest req) + throws IOException, NoSuchAlgorithmException, DigestException { + + throw new UnsupportedOperationException(); + + } + + @Override + public IHALogDigestResponse computeHALogDigest( + final IHALogDigestRequest req) throws IOException, + NoSuchAlgorithmException, DigestException { + + throw new UnsupportedOperationException(); + + } + + @Override + public Future<Void> globalWriteLock(final IHAGlobalWriteLockRequest req) + throws IOException, TimeoutException, InterruptedException { + + throw new UnsupportedOperationException(); + + } + /** * Return a proxy object for a {@link Future} suitable for use in an RMI * environment (the default implementation returns its argument). Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java 2012-11-05 14:00:22 UTC (rev 6700) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -29,6 +29,7 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.security.DigestException; import java.security.MessageDigest; import java.util.concurrent.Future; @@ -151,11 +152,18 @@ Object snapshotAllocators(); /** - * Compute the digest using the snapshot. + * Compute the digest. + * <p> + * Note: The digest is not reliable unless you either use a snapshot or + * suspend writes (on the quorum) while it is computed. * * @param snapshot - * The allocator snapshot. + * The allocator snapshot (optional). When given, the digest is + * computed only for the snapshot. When <code>null</code> it is + * computed for the entire file. + * @throws DigestException */ - void computeDigest(Object snapshot, MessageDigest digest); + void computeDigest(Object snapshot, MessageDigest digest) + throws DigestException, IOException; } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2012-11-05 14:00:22 UTC (rev 6700) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2012-11-06 16:53:42 UTC (rev 6701) @@ -29,6 +29,7 @@ import java.io.InputStream; import java.io.RandomAccessFile; import java.nio.ByteBuffer; +import java.security.DigestException; import java.security.MessageDigest; import java.util.UUID; import java.util.concurrent.Future; @@ -662,9 +663,11 @@ } @Override - public void computeDigest(Object snapshot, MessageDigest digest) { - // TODO Auto-generated method stub - throw new UnsupportedOperationException(); + public void computeDigest(final Object snapshot, final MessageDigest digest) + throws DigestException, IOException { + + m_store.computeDigest(snapshot, digest); + } public ByteBuffer readFromLocalStore(final long addr) Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2012-11-05 14:00:22 UTC (rev 6700) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2012-11... [truncated message content] |
From: <tho...@us...> - 2012-11-06 17:06:04
|
Revision: 6702 http://bigdata.svn.sourceforge.net/bigdata/?rev=6702&view=rev Author: thompsonbry Date: 2012-11-06 17:05:53 +0000 (Tue, 06 Nov 2012) Log Message: ----------- Removed ServiceId import in favor of ServiceID. Fixes to NSS test suite for ESTCARD. The varargs method was not being correctly invoked (it lacked an explicit cast of the context parameter to either an array or a resource). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2012-11-06 16:53:42 UTC (rev 6701) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2012-11-06 17:05:53 UTC (rev 6702) @@ -61,8 +61,6 @@ import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.data.ACL; -import sun.security.jca.ServiceId; - import com.bigdata.ha.HAGlue; import com.bigdata.ha.RunState; import com.bigdata.jini.start.IServiceListener; @@ -1149,7 +1147,7 @@ private final String TEST_SERVICE_DIR = "test.serviceDir"; /** - * Used to override the {@link ServiceId} in the deployed + * Used to override the {@link ServiceID} in the deployed * configuration. */ private final String TEST_SERVICE_ID = "test.serviceId"; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java 2012-11-06 16:53:42 UTC (rev 6701) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java 2012-11-06 17:05:53 UTC (rev 6702) @@ -551,24 +551,9 @@ doInsertbyURL("POST", packagePath + "test_estcard.ttl"); + + final long rangeCount = m_repo.size(); -// final RangeCountResult rangeCountResult = doRangeCount(// -// requestPath,// -// null,// s -// null,// p -// null,// o -// null // c -// ); -// -// assertEquals(7, rangeCountResult.rangeCount); - -// final RemoteRepository repo = new RemoteRepository(m_serviceURL); - final long rangeCount = m_repo.rangeCount( - null,// s - null,// p - null,// o - null // c - ); assertEquals(7, rangeCount); } @@ -578,23 +563,12 @@ doInsertbyURL("POST", packagePath + "test_estcard.ttl"); -// final RangeCountResult rangeCountResult = doRangeCount(// -// requestPath,// -// new URIImpl("http://www.bigdata.com/Mike"),// s -// null,// p -// null,// o -// null // c -// ); -// -// assertEquals(3, rangeCountResult.rangeCount); - -// final RemoteRepository repo = new RemoteRepository(m_serviceURL); - final long rangeCount = m_repo.rangeCount( - new URIImpl("http://www.bigdata.com/Mike"),// s - null,// p - null,// o - null // c - ); + final long rangeCount = m_repo.rangeCount(new URIImpl( + "http://www.bigdata.com/Mike"),// s + null,// p + null// o + ); + assertEquals(3, rangeCount); } @@ -604,22 +578,11 @@ doInsertbyURL("POST", packagePath + "test_estcard.ttl"); -// final RangeCountResult rangeCountResult = doRangeCount(// -// requestPath,// -// null,// s -// RDF.TYPE,// p -// null,// o -// null // c -// ); -// -// assertEquals(3, rangeCountResult.rangeCount); - -// final RemoteRepository repo = new RemoteRepository(m_serviceURL); final long rangeCount = m_repo.rangeCount( null,// s RDF.TYPE,// p - null,// o - null // c + null// o +// null // c ); assertEquals(3, rangeCount); @@ -629,24 +592,14 @@ doInsertbyURL("POST", packagePath + "test_estcard.ttl"); - -// final RangeCountResult rangeCountResult = doRangeCount(// -// requestPath,// -// null,// s -// RDFS.LABEL,// p -// null,// o -// null // c -// ); -// -// assertEquals(2, rangeCountResult.rangeCount); - -// final RemoteRepository repo = new RemoteRepository(m_serviceURL); + final long rangeCount = m_repo.rangeCount( null,// s RDFS.LABEL,// p - null,// o - null // c + null// o +// null // c ); + assertEquals(2, rangeCount); } @@ -655,24 +608,14 @@ doInsertbyURL("POST", packagePath + "test_estcard.ttl"); - -// final RangeCountResult rangeCountResult = doRangeCount(// -// requestPath,// -// null,// s -// null,// p -// new LiteralImpl("Mike"),// o -// null // c -// ); -// -// assertEquals(1, rangeCountResult.rangeCount); - -// final RemoteRepository repo = new RemoteRepository(m_serviceURL); + final long rangeCount = m_repo.rangeCount( null,// s null,// p - new LiteralImpl("Mike"),// o - null // c + new LiteralImpl("Mike")// o + // null // c ); + assertEquals(1, rangeCount); } @@ -681,24 +624,14 @@ doInsertbyURL("POST", packagePath + "test_estcard.ttl"); - -// final RangeCountResult rangeCountResult = doRangeCount(// -// requestPath,// -// new URIImpl("http://www.bigdata.com/Mike"),// s, -// RDF.TYPE,// p -// null,// o -// null // c -// ); -// -// assertEquals(1, rangeCountResult.rangeCount); - -// final RemoteRepository repo = new RemoteRepository(m_serviceURL); + final long rangeCount = m_repo.rangeCount( new URIImpl("http://www.bigdata.com/Mike"),// s, RDF.TYPE,// p - null,// o - null // c + null//,// o +// null // c ); + assertEquals(1, rangeCount); } @@ -714,22 +647,11 @@ doInsertbyURL("POST", packagePath + "test_estcard.trig"); -// final RangeCountResult rangeCountResult = doRangeCount(// -// requestPath,// -// null,// s, -// null,// p -// null,// o -// null // c -// ); -// -// assertEquals(7, rangeCountResult.rangeCount); - -// final RemoteRepository repo = new RemoteRepository(m_serviceURL); final long rangeCount = m_repo.rangeCount( null,// s, null,// p - null,// o - null // c + null// o +// null // c ); assertEquals(7, rangeCount); @@ -743,23 +665,13 @@ doInsertbyURL("POST", packagePath + "test_estcard.trig"); -// final RangeCountResult rangeCountResult = doRangeCount(// -// requestPath,// -// null,// s, -// null,// p -// null,// o -// new URIImpl("http://www.bigdata.com/")// c -// ); -// -// assertEquals(3, rangeCountResult.rangeCount); - -// final RemoteRepository repo = new RemoteRepository(m_serviceURL); final long rangeCount = m_repo.rangeCount( null,// s, null,// p null,// o new URIImpl("http://www.bigdata.com/")// c ); + assertEquals(3, rangeCount); } @@ -772,23 +684,13 @@ doInsertbyURL("POST", packagePath + "test_estcard.trig"); -// final RangeCountResult rangeCountResult = doRangeCount(// -// requestPath,// -// null,// s, -// null,// p -// null,// o -// new URIImpl("http://www.bigdata.com/c1")// c -// ); -// -// assertEquals(2, rangeCountResult.rangeCount); - -// final RemoteRepository repo = new RemoteRepository(m_serviceURL); final long rangeCount = m_repo.rangeCount( null,// s, null,// p null,// o new URIImpl("http://www.bigdata.com/c1")// c ); + assertEquals(2, rangeCount); } @@ -800,24 +702,14 @@ doInsertbyURL("POST", packagePath + "test_estcard.trig"); - -// final RangeCountResult rangeCountResult = doRangeCount(// -// requestPath,// -// new URIImpl("http://www.bigdata.com/Mike"),// s, -// null,// p -// null,// o -// new URIImpl("http://www.bigdata.com/c1")// c -// ); -// -// assertEquals(1, rangeCountResult.rangeCount); -// final RemoteRepository repo = new RemoteRepository(m_serviceURL); final long rangeCount = m_repo.rangeCount( new URIImpl("http://www.bigdata.com/Mike"),// s, null,// p null,// o new URIImpl("http://www.bigdata.com/c1")// c ); + assertEquals(1, rangeCount); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-11-07 13:29:32
|
Revision: 6704 http://bigdata.svn.sourceforge.net/bigdata/?rev=6704&view=rev Author: thompsonbry Date: 2012-11-07 13:29:20 +0000 (Wed, 07 Nov 2012) Log Message: ----------- Reduced stdout logging in BigdataStatics (100=>30). Bug fixes to TestHA2JournalServer (failure to get() a Future). Some code cleanup around tear down in the text fixture (fixes occasional NPE). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/BigdataStatics.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumImpl.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/BigdataStatics.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/BigdataStatics.java 2012-11-07 11:47:47 UTC (rev 6703) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/BigdataStatics.java 2012-11-07 13:29:20 UTC (rev 6704) @@ -58,7 +58,7 @@ * * @see ProcessHelper */ - public static int echoProcessStartupLineCount = 100;//Integer.MAX_VALUE;//100 + public static int echoProcessStartupLineCount = 30;//Integer.MAX_VALUE;//100 /** * Global switch controlling whether true thread local buffers or striped Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumImpl.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumImpl.java 2012-11-07 11:47:47 UTC (rev 6703) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumImpl.java 2012-11-07 13:29:20 UTC (rev 6704) @@ -63,6 +63,7 @@ import com.bigdata.quorum.QuorumException; import com.bigdata.quorum.QuorumMember; import com.bigdata.quorum.QuorumWatcher; +import com.bigdata.util.InnerCause; import com.bigdata.util.concurrent.DaemonThreadFactory; import com.bigdata.zookeeper.ZooKeeperAccessor; @@ -1393,16 +1394,21 @@ handleExpired(); } catch (KeeperException e1) { log.error(e, e1); - } catch (InterruptedException e1) { - /* - * Note: This exception probably only occurs through - * the shutdown of the ZKQuorumWatcher, which will - * shutdown the service handling the event. - */ - if (log.isInfoEnabled()) - log.info(e1); +// } catch (InterruptedException e1) { +// if (log.isInfoEnabled()) +// log.info(e1); } catch (Throwable e1) { - log.error(e, e1); + if (InnerCause.isInnerCause(e1, InterruptedException.class)) { + /* + * Note: This exception probably only occurs through the + * shutdown of the ZKQuorumWatcher, which will shutdown + * the service handling the event. + */ + if (log.isInfoEnabled()) + log.info(e1); + } else { + log.error(e, e1); + } } } } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2012-11-07 11:47:47 UTC (rev 6703) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2012-11-07 13:29:20 UTC (rev 6704) @@ -157,6 +157,11 @@ if (processHelper != this.processHelper) throw new AssertionError(); + /* + * Note: Do not clear the [processHelper] field. + */ + + // Mark the process as known dead. dead = true; } @@ -524,36 +529,46 @@ }); try { - + assertCondition(new Runnable() { public void run() { // Wait for the process death. assertTrue(serviceListener.isDead()); } - }); + }, 10/* timeout */, TimeUnit.SECONDS); - } catch (AssertionFailedError err) { + } catch (junit.framework.AssertionFailedError err) { /* * If we do not observe a normal process death, then attempt to kill * the child process. */ - log.error("Forcing kill of child process."); - try { - serviceListener.getProcessHelper() - .kill(true/* immediateShutdown */); - + final ProcessHelper processHelper = serviceListener + .getProcessHelper(); + + if (processHelper != null) { + + log.error("Forcing kill of child process."); + + processHelper.kill(true/* immediateShutdown */); + + } else { + + log.error("Child process not correctly terminated."); + + } + } catch (InterruptedException e) { // Ignore. } - throw err; - + fail("Process did not die by itself: " + haGlue, err); + } } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java 2012-11-07 11:47:47 UTC (rev 6703) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java 2012-11-07 13:29:20 UTC (rev 6704) @@ -159,11 +159,11 @@ if (leader == serverA) { - serverB.bounceZookeeperConnection(); + serverB.bounceZookeeperConnection().get(); } else { - serverA.bounceZookeeperConnection(); + serverA.bounceZookeeperConnection().get(); } @@ -231,13 +231,13 @@ final HAGlue leader = quorum.getClient().getLeader(token1); - leader.bounceZookeeperConnection(); + leader.bounceZookeeperConnection().get(); final long token2 = quorum.awaitQuorum(awaitQuorumTimeout, TimeUnit.MILLISECONDS); /* - * Bouncing the connection broke the quorun, so verify that the + * Bouncing the connection broke the quorum, so verify that the * quorum token was advanced. */ assertEquals(token1 + 1, token2); @@ -278,7 +278,8 @@ HAGlue serverA = startA(); HAGlue serverB = startB(); - final long token1 = quorum.awaitQuorum(awaitQuorumTimeout, TimeUnit.MILLISECONDS); + final long token1 = quorum.awaitQuorum(awaitQuorumTimeout, + TimeUnit.MILLISECONDS); doNSSStatusRequest(serverA); doNSSStatusRequest(serverB); @@ -325,7 +326,11 @@ // The leader should not have changed. final HAGlue leader2 = quorum.getClient().getLeader(token2); - assertTrue(leader == leader2); + if (leader != leader2) { + + fail("Expected leader=" + leader + ", but was " + leader2); + + } /* * Verify we can read on the KB on both nodes. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-11-08 14:58:42
|
Revision: 6707 http://bigdata.svn.sourceforge.net/bigdata/?rev=6707&view=rev Author: thompsonbry Date: 2012-11-08 14:58:30 +0000 (Thu, 08 Nov 2012) Log Message: ----------- The root cause is the pre-registration of the triple store in the global row store before the axioms and vocabulary objects have been initialized. Thus, a concurrent attempt to locate the triple store can succeed and return the row store properties without the axioms or vocabulary objects. This issue is clearly demonstrated by TestConcurrentKBCreate. The root cause was the GRS was being written in super.create(). It was then being updated after the lexicon and spo relations were initialized. So, a discovery would succeed since it was already in the GRS but the vocabulary and axioms had not yet been initialized. I fixed this by passing in the container to the LexiconRelation and SPORelation constructors and deferring the GRS writes until after everything was fully initialized. I also move the code to publish the reference into the default resource locator cache until after the commit() in create(). TestLocalTripleStore, the AST Evaluation test suite, TestConcurrentKBCreate(), and TestBigdataSailWithQuads are all green with this change. @see https://sourceforge.net/apps/trac/bigdata/ticket/617 (Concurrent KB create fails with "No axioms defined?") Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/AbstractRelation.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/AbstractResource.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPORelation.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestConcurrentKBCreate.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreUtil.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/AbstractRelation.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/AbstractRelation.java 2012-11-08 13:07:31 UTC (rev 6706) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/AbstractRelation.java 2012-11-08 14:58:30 UTC (rev 6707) @@ -47,6 +47,7 @@ import com.bigdata.journal.TemporaryStore; import com.bigdata.relation.accesspath.AccessPath; import com.bigdata.relation.accesspath.IAccessPath; +import com.bigdata.relation.locator.ILocatableResource; import com.bigdata.relation.rule.IAccessPathExpander; import com.bigdata.service.IBigdataFederation; import com.bigdata.striterator.IKeyOrder; @@ -69,11 +70,25 @@ final String namespace, final Long timestamp, final Properties properties) { - super(indexManager, namespace, timestamp, properties); + this(null/* container */, indexManager, namespace, timestamp, + properties); } /** + * Alternative version used when a resource exists within some container. + * The additional <i>container</i> argument provides access to the container + * before the container has been written to the global row store. + */ + protected AbstractRelation(final ILocatableResource container, + final IIndexManager indexManager, final String namespace, + final Long timestamp, final Properties properties) { + + super(container, indexManager, namespace, timestamp, properties); + + } + + /** * The fully qualified name of the index. * * @param keyOrder Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/AbstractResource.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/AbstractResource.java 2012-11-08 13:07:31 UTC (rev 6706) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/AbstractResource.java 2012-11-08 14:58:30 UTC (rev 6707) @@ -28,8 +28,6 @@ package com.bigdata.relation; -import java.util.Enumeration; -import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Properties; @@ -69,6 +67,7 @@ import com.bigdata.relation.rule.eval.ISolution; import com.bigdata.relation.rule.eval.ProgramTask; import com.bigdata.service.IBigdataFederation; +import com.bigdata.sparse.GlobalRowStoreUtil; /** * Base class for locatable resources. @@ -369,13 +368,26 @@ } - /** - * - */ protected AbstractResource(final IIndexManager indexManager, final String namespace, final Long timestamp, final Properties properties) { + this(null/* container */, indexManager, namespace, timestamp, + properties); + + } + + /** + * Alternative version used when a resource exists within some container. + * The additional <i>container</i> argument provides access to the container + * before the container has been written to the global row store. + */ + protected AbstractResource(final ILocatableResource container, + final IIndexManager indexManager, final String namespace, + final Long timestamp, final Properties properties) { + + // Note: [container] MAY be null. + if (indexManager == null) throw new IllegalArgumentException(); @@ -388,6 +400,9 @@ if (properties == null) throw new IllegalArgumentException(); + // Note: Non-null if this resource exists in some container + this.container = container; + // Note: Bound before we lookup property values! this.indexManager = indexManager; @@ -640,30 +655,7 @@ /* * Convert the Properties to a Map. */ - final Map<String, Object> map = new HashMap<String, Object>(); - { - - final Enumeration<? extends Object> e = properties.propertyNames(); - - while (e.hasMoreElements()) { - - final Object key = e.nextElement(); - -// if (!(key instanceof String)) { -// -// log.warn("Will not store non-String key: " + key); -// -// continue; -// -// } - - final String name = (String) key; - - map.put(name, properties.getProperty(name)); - - } - - } + final Map<String, Object> map = GlobalRowStoreUtil.convert(properties); // Write the map on the row store. final Map<String, Object> afterMap = indexManager.getGlobalRowStore() @@ -679,7 +671,7 @@ * Add this instance to the locator cache. * * Note: Normally, the instances are created by the locator cache - * itself. In general the only the the application creates an instance + * itself. In general the only time the application creates an instance * directly is when it is going to attempt to create the relation. This * takes advantage of that pattern to notify the locator that it should * cache this instance. Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreUtil.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreUtil.java 2012-11-08 14:58:30 UTC (rev 6707) @@ -0,0 +1,70 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +package com.bigdata.sparse; + +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +/** + * Utility method for use with a {@link SparseRowStore}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class GlobalRowStoreUtil { + + /** + * Convert the Properties to a Map. + */ + public static Map<String, Object> convert(final Properties properties) { + + final Map<String, Object> map = new HashMap<String, Object>(); + + final Enumeration<? extends Object> e = properties.propertyNames(); + + while (e.hasMoreElements()) { + + final Object key = e.nextElement(); + +// if (!(key instanceof String)) { +// +// log.warn("Will not store non-String key: " + key); +// +// continue; +// +// } + + final String name = (String) key; + + map.put(name, properties.getProperty(name)); + + } + + return map; + + } + +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2012-11-08 13:07:31 UTC (rev 6706) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2012-11-08 14:58:30 UTC (rev 6707) @@ -287,10 +287,19 @@ final String namespace, final Long timestamp, final Properties properties) { - super(indexManager, namespace, timestamp, properties); + this(null/* container */, indexManager, namespace, timestamp, + properties); + } + + public LexiconRelation(final AbstractTripleStore container, + final IIndexManager indexManager, final String namespace, + final Long timestamp, final Properties properties) { + + super(container, indexManager, namespace, timestamp, properties); + { - + this.textIndex = Boolean.parseBoolean(getProperty( AbstractTripleStore.Options.TEXT_INDEX, AbstractTripleStore.Options.DEFAULT_TEXT_INDEX)); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPORelation.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPORelation.java 2012-11-08 13:07:31 UTC (rev 6706) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPORelation.java 2012-11-08 14:58:30 UTC (rev 6707) @@ -229,7 +229,16 @@ final String namespace, final Long timestamp, final Properties properties) { - super(indexManager, namespace, timestamp, properties); + this(null/* container */, indexManager, namespace, timestamp, + properties); + + } + + public SPORelation(final AbstractTripleStore container, + final IIndexManager indexManager, final String namespace, + final Long timestamp, final Properties properties) { + + super(container, indexManager, namespace, timestamp, properties); /* * Reads off the property for the inference engine that tells us whether Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2012-11-08 13:07:31 UTC (rev 6706) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2012-11-08 14:58:30 UTC (rev 6707) @@ -32,7 +32,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; @@ -163,6 +162,7 @@ import com.bigdata.relation.rule.eval.ISolution; import com.bigdata.search.FullTextIndex; import com.bigdata.service.IBigdataFederation; +import com.bigdata.sparse.GlobalRowStoreUtil; import com.bigdata.striterator.ChunkedArrayIterator; import com.bigdata.striterator.ChunkedConvertingIterator; import com.bigdata.striterator.DelegateChunkedIterator; @@ -172,6 +172,7 @@ import com.bigdata.striterator.ICloseableIterator; import com.bigdata.striterator.IKeyOrder; import com.bigdata.util.InnerCause; +import com.bigdata.util.PropertyUtil; /** * Abstract base class that implements logic for the {@link ITripleStore} @@ -1523,33 +1524,47 @@ @Override public void create() { + if (log.isInfoEnabled()) + log.info(toString()); + assertWritable(); - - final Properties tmp = getProperties(); - - // set property that will let the contained relations locate their container. - tmp.setProperty(RelationSchema.CONTAINER, getNamespace()); + + final IResourceLock resourceLock = acquireExclusiveLock(); - if (Boolean.valueOf(tmp.getProperty(Options.TEXT_INDEX, - Options.DEFAULT_TEXT_INDEX))) { + try { - /* - * If the text index is enabled for a new kb instance, then disable - * the fieldId component of the full text index key since it is not - * used by the RDF database and will just waste space in the index. + final Properties tmp = PropertyUtil.flatCopy(getProperties()); + + // set property that will let the contained relations locate their container. + tmp.setProperty(RelationSchema.CONTAINER, getNamespace()); + + if (Boolean.valueOf(tmp.getProperty(Options.TEXT_INDEX, + Options.DEFAULT_TEXT_INDEX))) { + + /* + * If the text index is enabled for a new kb instance, then disable + * the fieldId component of the full text index key since it is not + * used by the RDF database and will just waste space in the index. + * + * Note: Also see below where this is set on the global row store. + */ + tmp.setProperty(FullTextIndex.Options.FIELDS_ENABLED, "false"); + + } + + /** + * We must not write the properties onto the global row store until + * they have been fully initialized. * - * Note: Also see below where this is set on the global row store. + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/617"> + * Concurrent KB create fails with "No axioms defined?" </a> */ - tmp.setProperty(FullTextIndex.Options.FIELDS_ENABLED, "false"); - - } - - final IResourceLock resourceLock = acquireExclusiveLock(); +// super.create(); - try { + final String SPO_NAMESPACE = getNamespace() + "." + + SPORelation.NAME_SPO_RELATION; - super.create(); - final String LEXICON_NAMESPACE = lexicon ? getNamespace() + "." + LexiconRelation.NAME_LEXICON_RELATION : null; @@ -1581,9 +1596,10 @@ } - lexiconRelation = new LexiconRelation(getIndexManager(), - LEXICON_NAMESPACE, - getTimestamp(), tmp); + lexiconRelation = new LexiconRelation(this/* container */, + getIndexManager(), LEXICON_NAMESPACE, getTimestamp(), + new Properties(tmp)// Note: Must wrap properties! + ); lexiconRelation.create();//assignedSplits); @@ -1591,8 +1607,10 @@ } - spoRelation = new SPORelation(getIndexManager(), getNamespace() - + "." + SPORelation.NAME_SPO_RELATION, getTimestamp(), tmp); + spoRelation = new SPORelation(this/* container */, + getIndexManager(), SPO_NAMESPACE, getTimestamp(), + new Properties(tmp)// Note: must wrap properties! + ); spoRelation.create();//assignedSplits); @@ -1628,52 +1646,90 @@ } + } + + /** + * Write on the global row store. We atomically set all + * properties, including the axioms and the vocabulary objects. + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/617"> + * Concurrent KB create fails with "No axioms defined?" + * </a> + */ + { + /* - * Update the global row store to set the axioms and the - * vocabulary objects. + * Convert the Properties to a Map. */ - { + final Map<String, Object> map = GlobalRowStoreUtil.convert(tmp); - final Map<String, Object> map = new HashMap<String, Object>(); + // primary key. + map.put(RelationSchema.NAMESPACE, getNamespace()); - // primary key. - map.put(RelationSchema.NAMESPACE, getNamespace()); - + if (axioms != null) { // axioms. map.put(TripleStoreSchema.AXIOMS, axioms); -// setProperty(TripleStoreSchema.AXIOMS,axioms); + // setProperty(TripleStoreSchema.AXIOMS,axioms); + } + if (vocabRef.get() != null) { // vocabulary. map.put(TripleStoreSchema.VOCABULARY, vocabRef.get()); -// setProperty(TripleStoreSchema.VOCABULARY,vocab); + // setProperty(TripleStoreSchema.VOCABULARY,vocab); + } - if (lexiconRelation.isTextIndex()) { - /* - * Per the logic and commentary at the top of create(), - * disable this option on the global row store. - */ - map.put(FullTextIndex.Options.FIELDS_ENABLED, "false"); - } + /* + * Note: This will now be false automatically since the [map] is + * based on the Properties object [tmp] and we have already set + * this property to [false] in tmp. + */ +// if (lexiconRelation.isTextIndex()) { +// /* +// * Per the logic and commentary at the top of create(), +// * disable this option on the global row store. +// */ +// map.put(FullTextIndex.Options.FIELDS_ENABLED, "false"); +// } + + // Write the map on the row store. + final Map<String, Object> afterMap = getIndexManager() + .getGlobalRowStore() + .write(RelationSchema.INSTANCE, map); + + if(log.isDebugEnabled()) { - // Write the map on the row store. - getIndexManager().getGlobalRowStore().write( - RelationSchema.INSTANCE, map); - + log.debug("Properties after write: " + afterMap); + } + /* + * Note: A commit is required in order for a read-committed view + * to have access to the registered indices. + * + * @todo have the caller do this? It does not really belong here + * since you can not make a large operation atomic if you do a + * commit here. + */ + + commit(); + + /* + * Add this instance to the locator cache, but NOT before we + * have committed the changes to the global row store. + * + * Note: Normally, the instances are created by the locator + * cache itself. In general the only time the application + * creates an instance directly is when it is going to attempt + * to create the relation. This takes advantage of that pattern + * to notify the locator that it should cache this instance. + */ + + ((DefaultResourceLocator) getIndexManager() + .getResourceLocator()).putInstance(this); + } - - /* - * Note: A commit is required in order for a read-committed view to - * have access to the registered indices. - * - * @todo have the caller do this? It does not really belong here - * since you can not make a large operation atomic if you do a - * commit here. - */ - commit(); - } catch (Throwable t) { if (!InnerCause.isInnerCause(t, InterruptedException.class)) { log.error(t, t); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestConcurrentKBCreate.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestConcurrentKBCreate.java 2012-11-08 13:07:31 UTC (rev 6706) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestConcurrentKBCreate.java 2012-11-08 14:58:30 UTC (rev 6707) @@ -96,6 +96,8 @@ * INFO : 41212 2012-11-06 08:38:41,875 : ... 23 more * </pre> * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/617"> + * Concurrent KB create fails with "No axioms defined?" </a> * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ public class TestConcurrentKBCreate extends ProxyBigdataSailTestCase { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-11-09 13:22:59
|
Revision: 6714 http://bigdata.svn.sourceforge.net/bigdata/?rev=6714&view=rev Author: thompsonbry Date: 2012-11-09 13:22:53 +0000 (Fri, 09 Nov 2012) Log Message: ----------- I checked the code for all invocations of {{{ final RDFFormat format = RDFFormat.forMIMEType(contentType); }}} I identified several other locations where the same problem exists: - UpdateServlet - DeleteServlet - InsertServlet - AST2BOpUpdate (SPARQL LOAD operation). All locations have been fixed. @see https://sourceforge.net/apps/trac/bigdata/ticket/620 (UpdateServlet fails to parse MIMEType when doing conneg.) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUpdate.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUpdate.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUpdate.java 2012-11-08 19:49:21 UTC (rev 6713) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUpdate.java 2012-11-09 13:22:53 UTC (rev 6714) @@ -41,7 +41,6 @@ import java.util.Map; import java.util.Set; import java.util.UUID; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.zip.GZIPInputStream; @@ -97,6 +96,7 @@ import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; import com.bigdata.rdf.sail.SPARQLUpdateEvent; import com.bigdata.rdf.sail.Sesame2BigdataIterator; +import com.bigdata.rdf.sail.webapp.MiniMime; import com.bigdata.rdf.sparql.ast.ASTContainer; import com.bigdata.rdf.sparql.ast.AbstractGraphDataUpdate; import com.bigdata.rdf.sparql.ast.AddGraph; @@ -1393,8 +1393,14 @@ // The file path. final String n = sourceURL.getFile(); - // Attempt to obtain the format from the Content-Type. - RDFFormat format = RDFFormat.forMIMEType(contentType); + /** + * Attempt to obtain the format from the Content-Type. + * + * <a href="https://sourceforge.net/apps/trac/bigdata/ticket/620"> + * UpdateServlet fails to parse MIMEType when doing conneg. </a> + */ + RDFFormat format = RDFFormat.forMIMEType(new MiniMime(contentType) + .getMimeType()); if (format == null) { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2012-11-08 19:49:21 UTC (rev 6713) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2012-11-09 13:22:53 UTC (rev 6714) @@ -281,11 +281,15 @@ try { - /* + /** * There is a request body, so let's try and parse it. + * + * <a href="https://sourceforge.net/apps/trac/bigdata/ticket/620"> + * UpdateServlet fails to parse MIMEType when doing conneg. </a> */ - final RDFFormat format = RDFFormat.forMIMEType(contentType); + final RDFFormat format = RDFFormat.forMIMEType(new MiniMime( + contentType).getMimeType()); if (format == null) { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2012-11-08 19:49:21 UTC (rev 6713) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2012-11-09 13:22:53 UTC (rev 6714) @@ -143,8 +143,14 @@ if (log.isInfoEnabled()) log.info("Request body: " + contentType); - final RDFFormat format = RDFFormat.forMIMEType(contentType); + /** + * <a href="https://sourceforge.net/apps/trac/bigdata/ticket/620"> + * UpdateServlet fails to parse MIMEType when doing conneg. </a> + */ + final RDFFormat format = RDFFormat + .forMIMEType(new MiniMime(contentType).getMimeType()); + if (format == null) { buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, @@ -389,26 +395,40 @@ hconn.setDoOutput(false); hconn.setReadTimeout(0);// no timeout? http param? - /* + /** * There is a request body, so let's try and parse it. + * + * <a href= + * "https://sourceforge.net/apps/trac/bigdata/ticket/620" + * > UpdateServlet fails to parse MIMEType when doing + * conneg. </a> */ final String contentType = hconn.getContentType(); + + RDFFormat format = RDFFormat.forMIMEType(new MiniMime( + contentType).getMimeType()); - RDFFormat format = RDFFormat.forMIMEType(contentType); - - if(format == null) { - // Try to get the RDFFormat from the URL's file path. + if (format == null) { + + /* + * Try to get the RDFFormat from the URL's file + * path. + */ + format = RDFFormat.forFileName(url.getFile()); + } - + if (format == null) { - buildResponse(resp, HTTP_BADREQUEST, + + buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, "Content-Type not recognized as RDF: " + contentType); - - return; + + return; + } final RDFParserFactory rdfParserFactory = RDFParserRegistry Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java 2012-11-08 19:49:21 UTC (rev 6713) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java 2012-11-09 13:22:53 UTC (rev 6714) @@ -129,8 +129,14 @@ if (log.isInfoEnabled()) log.info("Request body: " + contentType); - final RDFFormat requestBodyFormat = RDFFormat.forMIMEType(contentType); + /** + * <a href="https://sourceforge.net/apps/trac/bigdata/ticket/620"> + * UpdateServlet fails to parse MIMEType when doing conneg. </a> + */ + final RDFFormat requestBodyFormat = RDFFormat.forMIMEType(new MiniMime( + contentType).getMimeType()); + if (requestBodyFormat == null) { buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, @@ -453,40 +459,42 @@ try { conn = getBigdataRDFContext() - .getUnisolatedConnection(namespace); - - if (remove != null) { - - final String contentType = remove.getContentType(); - - final InputStream is = remove.getInputStream(); - - final RDFHandler handler = new RemoveStatementHandler( - conn.getSailConnection(), nmodified, defaultContextDelete); - - processData(conn, contentType, is, handler, baseURI); - - } - - if (add != null) { - - final String contentType = add.getContentType(); - - final InputStream is = add.getInputStream(); - - final RDFHandler handler = new AddStatementHandler( - conn.getSailConnection(), nmodified, defaultContextInsert); - - processData(conn, contentType, is, handler, baseURI); - - } - - conn.commit(); + .getUnisolatedConnection(namespace); - final long elapsed = System.currentTimeMillis() - begin; - + if (remove != null) { + + final String contentType = remove.getContentType(); + + final InputStream is = remove.getInputStream(); + + final RDFHandler handler = new RemoveStatementHandler( + conn.getSailConnection(), nmodified, + defaultContextDelete); + + processData(conn, contentType, is, handler, baseURI); + + } + + if (add != null) { + + final String contentType = add.getContentType(); + + final InputStream is = add.getInputStream(); + + final RDFHandler handler = new AddStatementHandler( + conn.getSailConnection(), nmodified, + defaultContextInsert); + + processData(conn, contentType, is, handler, baseURI); + + } + + conn.commit(); + + final long elapsed = System.currentTimeMillis() - begin; + reportModifiedCount(resp, nmodified.get(), elapsed); - + } catch (Throwable t) { if (conn != null) @@ -517,8 +525,16 @@ final String baseURI) throws Exception { - final RDFFormat format = RDFFormat.forMIMEType(contentType); - + /** + * Note: The request was already validated. + * + * <a href="https://sourceforge.net/apps/trac/bigdata/ticket/620"> + * UpdateServlet fails to parse MIMEType when doing conneg. </a> + */ + + final RDFFormat format = RDFFormat + .forMIMEType(new MiniMime(contentType).getMimeType()); + final RDFParserFactory rdfParserFactory = RDFParserRegistry .getInstance().get(format); @@ -559,8 +575,9 @@ } - final RDFFormat format = RDFFormat.forMIMEType(contentType); - + final RDFFormat format = RDFFormat + .forMIMEType(new MiniMime(contentType).getMimeType()); + if (format == null) { buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-11-26 16:28:08
|
Revision: 6718 http://bigdata.svn.sourceforge.net/bigdata/?rev=6718&view=rev Author: thompsonbry Date: 2012-11-26 16:27:54 +0000 (Mon, 26 Nov 2012) Log Message: ----------- Commit includes refactoring to support coalescing records in the write cache service for the RW mode Journal. This behavior is parameterized. You can specify the desired maximum size of the dirty list, and thus implicitly specify the minimum #of buffers to be retained on the clean list. This effects when eviction occurs. You can also disable compaction by setting the compactionThreshold to 100 percent. The default number of write cache buffers (6) is unchanged in this commit, but it can now be increased substantially with good effect so long as you also increase the maximum amount of direct memory that the JVM is willing to allocate. All tests for the write cache service and the RWJournal are green. Tests for the HA mode are unchanged. AST level tests are green. Committing now for CI. @see https://sourceforge.net/apps/trac/bigdata/ticket/621 (Coalesce records in write cache) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/Options.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWriteCacheServiceLifetime.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2012-11-09 16:09:37 UTC (rev 6717) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2012-11-26 16:27:54 UTC (rev 6718) @@ -1,2442 +1,2551 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -/* - * Created on Feb 10, 2010 - */ - -package com.bigdata.io.writecache; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import org.apache.log4j.Logger; - -import com.bigdata.btree.IndexSegmentBuilder; -import com.bigdata.counters.CAT; -import com.bigdata.counters.CounterSet; -import com.bigdata.counters.Instrument; -import com.bigdata.ha.msg.HAWriteMessage; -import com.bigdata.ha.msg.IHAWriteMessage; -import com.bigdata.io.DirectBufferPool; -import com.bigdata.io.FileChannelUtility; -import com.bigdata.io.IBufferAccess; -import com.bigdata.io.IReopenChannel; -import com.bigdata.journal.AbstractBufferStrategy; -import com.bigdata.journal.IRootBlockView; -import com.bigdata.journal.StoreTypeEnum; -import com.bigdata.journal.WORMStrategy; -import com.bigdata.rawstore.Bytes; -import com.bigdata.rawstore.IRawStore; -import com.bigdata.rwstore.RWStore; -import com.bigdata.util.ChecksumError; -import com.bigdata.util.ChecksumUtility; -import com.bigdata.util.concurrent.Memoizer; - -/** - * This class provides a write cache with read-through for NIO writes on a - * {@link FileChannel} (and potentially on a remote service). This class is - * designed to maximize the opportunity for efficient NIO by combining many - * writes onto a single direct {@link ByteBuffer} and then efficiently - * transferring those writes onto the backing channel in a channel dependent - * manner. In general, there are three use cases for a {@link WriteCache}: - * <ol> - * <li>Gathered writes. This case is used by the {@link RWStore}.</li> - * <li>Pure append of sequentially allocated records. This case is used by the - * {@link WORMStrategy} (WORM) and by the {@link IndexSegmentBuilder}.</li> - * <li>Write of a single large buffer owned by the caller. This case may be used - * when the caller wants to manage the buffers or when the caller's buffer is - * larger than the write cache.</li> - * </ol> - * The caller is responsible for managing which buffers are being written on and - * read on, when they are flushed, and when they are reset. It is perfectly - * reasonable to have more than one {@link WriteCache} and to read through on - * any {@link WriteCache} until it has been recycled. A {@link WriteCache} must - * be reset before it is put into play again for new writes. - * <p> - * Note: For an append-only model (WORM), the caller MUST serialize writes onto - * the {@link IRawStore} and the {@link WriteCache}. This is required in order - * to ensure that the records are laid out in a dense linear fashion on the - * {@link WriteCache} and permits the backing buffer to be transferred in a - * single IO to the backing file. - * <p> - * Note: For a {@link RWStore}, the caller must take more responsibility for - * managing the {@link WriteCache}(s) which are in play and scheduling their - * eviction onto the backing store. The caller can track the space remaining in - * each {@link WriteCache} and decide when to flush a {@link WriteCache} based - * on that information. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ -abstract public class WriteCache implements IWriteCache { - - protected static final Logger log = Logger.getLogger(WriteCache.class); - - /** - * <code>true</code> iff per-record checksums are being maintained. - */ - private final boolean useChecksum; - - /** - * <code>true</code> iff per-record checksums are being maintained. - */ - private final boolean prefixWrites; - - /** - * The size of the header for a prefix write. - */ - static final int SIZEOF_PREFIX_WRITE_METADATA = 8/* offset */+ 4/* size */+ 4/* latchedAddr */; - - /** - * The buffer used to absorb writes that are destined for some channel. - * <p> - * Note: This is an {@link AtomicReference} since we want to clear this - * field in {@link #close()}. - */ - final private AtomicReference<IBufferAccess> buf; - - /** - * The read lock allows concurrent {@link #acquire()}s and permits both - * reads and writes on the acquired buffer, while the write lock prevents - * {@link #acquire()} during critical sections such as - * {@link #flush(boolean, long, TimeUnit)}, {@link #reset()}, and - * {@link #close()}. - */ - final private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - - /** - * Return the backing {@link ByteBuffer}. The caller may read or write on - * the buffer, but MUST NOT have a side effect on the - * {@link ByteBuffer#position()} without first synchronizing on the - * {@link ByteBuffer}. Once they are done, the caller MUST call - * {@link #release()}. - * <p> - * Note: This uses the read lock to allow concurrent read/write operations - * on the backing buffer. - * <p> - * Note: <strong>At most one write operation may execute concurrently in - * order to avoid side effects on the buffers position when copying data - * onto the buffer. This constraint must be imposed by the caller using a - * <code>synchronized(buf){}</code> block during the critical sections where - * the buffer position will be updated by a write. </strong> - * - * @return The {@link ByteBuffer}. - * - * @throws InterruptedException - * @throws IllegalStateException - * if the {@link WriteCache} is closed. - */ - private ByteBuffer acquire() throws InterruptedException, IllegalStateException { - - final Lock readLock = lock.readLock(); - - readLock.lockInterruptibly(); - - try { - - // latch.inc(); - - final IBufferAccess tmp = buf.get(); - - if (tmp == null) { - - // latch.dec(); - - throw new IllegalStateException(); - - } - - // Note: The ReadLock is still held! - return tmp.buffer(); - - } catch (Throwable t) { - - // Release the lock only on the error path. - readLock.unlock(); - - if (t instanceof InterruptedException) - throw (InterruptedException) t; - - if (t instanceof IllegalStateException) - throw (IllegalStateException) t; - - throw new RuntimeException(t); - - } - - } - - /** - * Release the read lock on an acquired {@link ByteBuffer}. - */ - private void release() { - - lock.readLock().unlock(); - - // latch.dec(); - - } - - /** - * Return a read-only view of the backing {@link ByteBuffer}. - * - * @return The read-only view -or- <code>null</code> if the - * {@link WriteCache} has been closed. - */ - ByteBuffer peek() { - - final ByteBuffer b = buf.get().buffer(); - - return b == null ? null : b.asReadOnlyBuffer(); - - } - - // /** - // * Return the buffer. No other thread will have access to the buffer. No - // * latch is established and there is no protocol for releasing the buffer - // * back. Instead, the buffer will become available again if the caller - // * releases the write lock. - // * - // * @throws IllegalMonitorStateException - // * unless the caller is holding the write lock. - // * @throws IllegalStateException - // * if the buffer reference has been cleared. - // */ - // protected ByteBuffer getExclusiveBuffer() { - // - // if (!lock.writeLock().isHeldByCurrentThread()) - // throw new IllegalMonitorStateException(); - // - // final ByteBuffer tmp = buf.get(); - // - // if (tmp == null) - // throw new IllegalStateException(); - // - // return tmp; - // - // } - - /** - * The metadata associated with a record in the {@link WriteCache}. - */ - public static class RecordMetadata { - - /** - * The offset of the record in the file. The offset may be relative to a - * base offset known to the writeOnChannel() implementation. - */ - public final long fileOffset; - - /** - * The offset within the {@link WriteCache}'s backing {@link ByteBuffer} - * of the start of the record. - */ - public final int bufferOffset; - - /** - * The length of the record in bytes as it will be written on the - * channel. If checksums are being written, then the length of the - * record has already been incorporated into this value. - */ - public final int recordLength; - - /** - * The RWStore latched address for the record. This can be used to - * recover the FixedAllocator. This field is only required for the - * RWStore and then only for HA. - */ - public final int latchedAddr; - - public RecordMetadata(final long fileOffset, final int bufferOffset, final int recordLength, final int latchedAddr) { - - this.fileOffset = fileOffset; - - this.bufferOffset = bufferOffset; - - this.recordLength = recordLength; - - this.latchedAddr = latchedAddr; - - } - - public String toString() { - - return getClass().getSimpleName() + "{fileOffset=" + fileOffset + ",off=" + bufferOffset + ",len=" - + recordLength + "}"; - - } - - } - - /** - * An index into the write cache used for read through on the cache. The - * keys are the file offsets that would be used to read the corresponding - * record. The values describe the position in buffer where that record is - * found and the length of the record. - */ - final private ConcurrentMap<Long, RecordMetadata> recordMap; - - /** - * The offset of the first record written onto the {@link WriteCache}. This - * information is used when {@link #appendOnly} is <code>true</code> as it - * gives the starting offset at which the entire {@link ByteBuffer} may be - * written in a single IO. When {@link #appendOnly} is <code>false</code> - * this is basically meaningless. This is initialized to <code>-1L</code> as - * a clear indicator that there is no valid record written yet onto the - * cache. - */ - final private AtomicLong firstOffset = new AtomicLong(-1L); - - /** - * Exposed to the WORM for HA support. - * - * @param firstOffset - * The first offset (from the HA message). - */ - protected void setFirstOffset(final long firstOffset) { - - this.firstOffset.set(firstOffset); - - } - - /** - * The capacity of the backing buffer. - */ - final private int capacity; - - /** - * When <code>true</code> {@link #close()} will release the - * {@link ByteBuffer} back to the {@link DirectBufferPool}. - */ - final private boolean releaseBuffer; - - /** - * A private instance used to compute the checksum of all data in the - * current {@link #buf}. This is enabled for the high availability write - * replication pipeline. The checksum over the entire {@link #buf} is - * necessary in this context to ensure that the receiver can verify the - * contents of the {@link #buf}. The per-record checksums CAN NOT be used - * for this purpose since large records may be broken across - */ - final private ChecksumHelper checker; - - /** - * The then current extent of the backing file as of the last record written - * onto the cache before it was written onto the write replication pipeline. - * The receiver is responsible for adjusting its local file size to match. - * - * @see WriteCacheService#setExtent(long) - */ - private final AtomicLong fileExtent = new AtomicLong(); - - /** - * m_closedForWrites is set when the buffer is about to be flushed and ensures that - * nothing will be appended to the buffer until it is reset for reuse. This - * fixes a problem in the HA Pipeline where deletes could append to the buffer resulting - * in a reported buffer length in the HAMessage greater than the data sent. - */ - private boolean m_closedForWrites = false; - - /** - * The sequence must be set when the cache is ready to be flushed. In HA this - * is sent down the pipeline to ensure correct synchronization when processing - * logged messages. - */ - private long sequence = -1; - - /** - * The sequence #of this {@link WriteCache} block within the current write - * set (origin ZERO(0)). This must be set when the cache is ready to be - * flushed. In HA this is sent down the pipeline to ensure correct - * synchronization when processing logged messages. This also winds up in - * the {@link IRootBlockView} as a summary of the #of {@link WriteCache} - * blocks transmitted during the write set for a specific commit point. - */ - void setSequence(final long i) { - sequence = i; - } - - /** - * Create a {@link WriteCache} from either a caller supplied buffer or a - * direct {@link ByteBuffer} allocated from the {@link DirectBufferPool}. - * <p> - * Note: The application MUST ensure that it {@link #close()}s the - * {@link WriteCache} or it can leak direct {@link ByteBuffer}s! - * <p> - * Note: NIO operations are performed using a direct {@link ByteBuffer} - * (that is, one use backing bytes are allocated on the C heap). When the - * caller supplies a {@link ByteBuffer} that is allocated on the Java heap - * as opposed to in native memory, a temporary direct {@link ByteBuffer} - * will be allocated for the IO operation by Java. The JVM can fail to - * release this temporary direct {@link ByteBuffer}, resulting in a memory - * leak. For this reason, the {@link WriteCache} SHOULD use a direct - * {@link ByteBuffer}. - * - * @see http://bugs.sun.com/bugdatabase/view_bug.do;jsessionid=8f - * ab76d1d4479fffffffffa5abfb09c719a30?bug_id=6210541 - * - * @param buf - * A {@link ByteBuffer} to be used as the write cache (optional). - * When <code>null</code> a buffer will be allocated for you from - * the {@link DirectBufferPool}. Buffers allocated on your behalf - * will be automatically released by {@link #close()}. - * @param scatteredWrites - * <code>true</code> iff the implementation uses scattered - * writes. The RW store uses scattered writes since its updates - * are written to different parts of the backing file. The WORM - * store does not since all updates are written to the end of the - * user extent in the backing file. - * @param useChecksum - * <code>true</code> iff the write cache will store the caller's - * checksum for a record and validate it on read. - * @param isHighlyAvailable - * when <code>true</code> the whole record checksum is maintained - * for use when replicating the write cache along the write - * pipeline. - * @param bufferHasData - * when <code>true</code> the caller asserts that the buffer has - * data (from a replicated write), in which case the position - * should be the start of the data in the buffer and the limit - * the #of bytes with valid data. when <code>false</code>, the - * caller's buffer will be cleared. The code presumes that the - * {@link WriteCache} instance will be used to lay down a single - * buffer worth of data onto the backing file. - * @param fileExtent - * The then current extent of the backing file. - * - * @throws InterruptedException - */ - public WriteCache(IBufferAccess buf, final boolean scatteredWrites, final boolean useChecksum, - final boolean isHighlyAvailable, final boolean bufferHasData, - final long fileExtent) throws InterruptedException { - - if (bufferHasData && buf == null) - throw new IllegalArgumentException(); - - if (buf == null) { - - buf = DirectBufferPool.INSTANCE.acquire(); - - this.releaseBuffer = true; - - } else { - - this.releaseBuffer = false; - - } - - // if (quorumManager == null) - // throw new IllegalArgumentException(); - - // this.quorumManager = quorumManager; - - this.useChecksum = useChecksum; - this.prefixWrites = scatteredWrites; - - if (isHighlyAvailable && !bufferHasData) { - // Note: No checker if buffer has data. - checker = new ChecksumHelper(); - } else { - checker = null; - } - - // save reference to the write cache. - this.buf = new AtomicReference<IBufferAccess>(buf); - - // the capacity of the buffer in bytes. - this.capacity = buf.buffer().capacity(); - - // apply the then current file extent. - this.fileExtent.set(fileExtent); - - /* - * Discard anything in the buffer, resetting the position to zero, the - * mark to zero, and the limit to the capacity. - */ - if (!bufferHasData) { - buf.buffer().clear(); - } - - /* - * An estimate of the #of records that might fit within the write cache. - * This is based on an assumption that the "average" record is 1k. This - * is used solely to assign the initial capacity of this map. - */ - final int indexDefaultCapacity = capacity / (1 * Bytes.kilobyte32); - - /* - * allocate and initialize the write cache index. - * - * For scattered writes we choose to use a sorted map so that we can - * easily flush writes to the file channel in order. This may not be - * important depending on the caching strategy of the underlying system - * but it cannot be a bad thing. - * - * If we do not need to support scattered writes then we have the option - * to use the ConcurrentHashMap which has the advantage of constant - * access time for read through support. - * - * TODO: some literature indicates the ConcurrentSkipListMap scales - * better with concurrency, so we should benchmark this option for - * non-scattered writes as well. - */ - if (scatteredWrites) { - recordMap = new ConcurrentSkipListMap<Long, RecordMetadata>(); - } else { - recordMap = new ConcurrentHashMap<Long, RecordMetadata>(indexDefaultCapacity); - } - - if (bufferHasData) { - /* - * Populate the record map from the record. - */ - resetRecordMapFromBuffer(); - } - - } - - /** - * Adds some debugging information. - */ - public String toString() { - - return super.toString()// - + "{recordCount=" + recordMap.size()// - + ",firstOffset=" + firstOffset// - + ",releaseBuffer=" + releaseBuffer// - + ",bytesWritten=" + bytesWritten()// - + ",bytesRemaining=" + remaining()// - + "}"; - - } - - /** - * The offset of the first record written onto the {@link WriteCache}. This - * information is used when {@link #appendOnly} is <code>true</code> as it - * gives the starting offset at which the entire {@link ByteBuffer} may be - * written in a single IO. When {@link #appendOnly} is <code>false</code> - * this is basically meaningless. - * <p> - * Note: This has been raised into the - * {@link #writeOnChannel(ByteBuffer, long, Map, long)} method signature. It - * has been reduced to a package private method so it will remain visible to - * the unit tests, otherwise it could become private. - * - * @return The first offset written into the {@link WriteCache} since it was - * last {@link #reset()} and <code>-1L</code> if nothing has been - * written since the {@link WriteCache} was created or was last - * {@link #reset()}. - */ - final long getFirstOffset() { - - return firstOffset.get(); - - } - - /** - * The maximum length of a record which could be inserted into the buffer. - * <p> - * Note: When checksums are enabled, this is 4 bytes less than the actual - * capacity of the underlying buffer since each record requires an - * additional four bytes for the checksum field. - */ - final public int capacity() { - - return capacity - (useChecksum ? 4 : 0) - (prefixWrites ? SIZEOF_PREFIX_WRITE_METADATA : 0); - - } - - /** - * Return the #of bytes remaining in the buffer. - * <p> - * Note: in order to rely on this value the caller MUST have exclusive - * access to the buffer. This API does not provide the means for acquiring - * that exclusive access. This is something that the caller has to arrange - * for themselves, which is why this is a package private method. - */ - final int remaining() { - - final int remaining = capacity - bytesWritten();//buf.get().buffer().position(); - - return remaining; - - } - - /** - * The #of bytes written on the backing buffer. - * <p> - * Note: in order to rely on this value the caller MUST have exclusive - * access to the buffer. This API does not provide the means for acquiring - * that exclusive access. This is something that the caller has to arrange - * for themselves, which is why this is a package private method. - */ - public final int bytesWritten() { - - return buf.get().buffer().position(); - - } - - /** - * Return <code>true</code> if there are no records buffered on the cache. - * Note: The caller MUST be holding a lock for this to be value. Probably - * the write lock. - * - * @todo This currently tests the {@link #recordMap}. In fact, for at least - * the {@link RWStore} the record map COULD be empty with cleared - * writes on the backing {@link ByteBuffer}. Therefore this tests - * whether the {@link WriteCache} has data to be written but does not - * clearly report whether or not some data has been written onto the - * buffer (and hence it has fewer bytes remaining than might otherwise - * be expected). - */ - final public boolean isEmpty() { - - return recordMap.isEmpty(); - - } - - /** - * Set the current extent of the backing file on the {@link WriteCache} - * object. When used as part of an HA write pipeline, the receiver is - * responsible for adjusting its local file size to match the file extent in - * each {@link WriteCache} message. - * - * @param fileExtent - * The current extent of the file. - * - * @throws IllegalArgumentException - * if the file extent is negative. - * - * @see WriteCacheService#setExtent(long) - */ - public void setFileExtent(final long fileExtent) { - - if (fileExtent < 0L) - throw new IllegalArgumentException(); - - this.fileExtent.set(fileExtent); - - } - - public long getFileExtent() { - - return fileExtent.get(); - - } - - /** - * Return the checksum of all data written into the backing buffer for this - * {@link WriteCache} instance since it was last {@link #reset()}. - * - * @return The running checksum of the data written into the backing buffer. - * - * @throws UnsupportedOperationException - * if the {@link WriteCache} is not maintaining this checksum - * (i.e., if <code>isHighlyAvailable := false</code> was - * specified to the constructor). - */ - public int getWholeBufferChecksum() { - - if (checker == null) - throw new UnsupportedOperationException(); - - return checker.getChecksum(); - - } - - /** - * {@inheritDoc} - * - * @throws IllegalStateException - * If the buffer is closed. - * @throws IllegalArgumentException - * If the caller's record is larger than the maximum capacity of - * cache (the record could not fit within the cache). The caller - * should check for this and provide special handling for such - * large records. For example, they can be written directly onto - * the backing channel. - */ - public boolean write(final long offset, final ByteBuffer data, final int chk) throws InterruptedException { - - return write(offset, data, chk, true/* writeChecksum */,0/*latchedAddr*/); - - } - - /** - * - * @param offset - * @param data - * @param chk - * @param writeChecksum - * The checksum is appended to the record IFF this argument is - * <code>true</code> and checksums are in use. - * @return - * @throws InterruptedException - */ - boolean write(final long offset, final ByteBuffer data, final int chk, boolean writeChecksum, final int latchedAddr) - throws InterruptedException { - - // Note: The offset MAY be zero. This allows for stores without any - // header block. - - if (m_written) { // should be clean, NO WAY should this be written to! - log.error("Writing to CLEAN cache: " + hashCode()); - throw new IllegalStateException("Writing to CLEAN cache: " + hashCode()); - } - - if (data == null) - throw new IllegalArgumentException(AbstractBufferStrategy.ERR_BUFFER_NULL); - - final WriteCacheCounters counters = this.counters.get(); - - final ByteBuffer tmp = acquire(); - - try { - - final int remaining = data.remaining(); - - // The #of bytes to transfer into the write cache. - final int datalen = remaining + (writeChecksum && useChecksum ? 4 : 0); - final int nwrite = datalen + (prefixWrites ? SIZEOF_PREFIX_WRITE_METADATA : 0); - - if (nwrite > capacity) { - // This is more bytes than the total capacity of the buffer. - throw new IllegalArgumentException(AbstractBufferStrategy.ERR_BUFFER_OVERRUN); - - } - - if (remaining == 0) - throw new IllegalArgumentException(AbstractBufferStrategy.ERR_BUFFER_EMPTY); - - /* - * Note: We need to be synchronized on the ByteBuffer here since - * this operation relies on the position() being stable. - * - * Note: Also see clearAddrMap(long) which is synchronized on the - * acquired ByteBuffer in the same manner to protect it during - * critical sections which have a side effect on the buffer - * position. - */ - final int pos; - synchronized (tmp) { - - // the position() at which the record is cached in the buffer. - final int spos = tmp.position(); - - if (spos + nwrite > capacity) { - - /* - * There is not enough room left in the write cache for this - * record. - */ - - return false; - - } - - // add prefix data if required and set data position in buffer - if (prefixWrites) { - tmp.putLong(offset); - tmp.putInt(datalen); - tmp.putInt(latchedAddr); - pos = spos + SIZEOF_PREFIX_WRITE_METADATA; - } else { - pos = spos; - } - - tmp.put(data); - - // copy the record into the cache, updating position() as we go. - // TODO: Note that the checker must be invalidated if a RWCache - // "deletes" an entry - // by zeroing an address. - if (checker != null) { - // update the checksum (no side-effects on [data]) - final ByteBuffer chkBuf = tmp.asReadOnlyBuffer(); - chkBuf.position(spos); - chkBuf.limit(tmp.position()); - checker.update(chkBuf); - } - - // write checksum - if any - if (writeChecksum && useChecksum) { - tmp.putInt(chk); - if (checker != null) { - // update the running checksum to include this too. - checker.update(chk); - } - } - - // set while synchronized since no contention. - firstOffset.compareAndSet(-1L/* expect */, offset/* update */); - - // update counters while holding the lock. - counters.naccept++; - counters.bytesAccepted += nwrite; - - } // synchronized(tmp) - - /* - * Add metadata for the record so it can be read back from the - * cache. - */ - if (recordMap.put(Long.valueOf(offset), new RecordMetadata(offset, pos, datalen, latchedAddr)) != null) { - /* - * Note: This exception indicates that the abort protocol did - * not reset() the current write cache before new writes were - * laid down onto the buffer. - */ - throw new AssertionError("Record exists for offset in cache: offset=" + offset); - } - - if (log.isTraceEnabled()) { // @todo rather than hashCode() set a - // buffer# on each WriteCache instance. - log.trace("offset=" + offset + ", pos=" + pos + ", nwrite=" + nwrite + ", writeChecksum=" - + writeChecksum + ", useChecksum=" + useChecksum + ", nrecords=" + recordMap.size() - + ", hashCode=" + hashCode()); - } - - return true; - - } finally { - - release(); - - } - - } - - /** - * {@inheritDoc} - * - * @throws IllegalStateException - * If the buffer is closed. - */ - public ByteBuffer read(final long offset) throws InterruptedException, ChecksumError { - - final WriteCacheCounters counters = this.counters.get(); - - final ByteBuffer tmp = acquire(); - - try { - - // Look up the metadata for that record in the cache. - final RecordMetadata md; - if ((md = recordMap.get(offset)) == null) { - - // The record is not in this write cache. - counters.nmiss.increment(); - - return null; - } - - // length of the record w/o checksum field. - final int reclen = md.recordLength - (useChecksum ? 4 : 0); - - // the start of the record in writeCache. - final int pos = md.bufferOffset; - - // create a view with same offset, limit and position. - final ByteBuffer view = tmp.duplicate(); - - // adjust the view to just the record of interest. - view.limit(pos + reclen); - view.position(pos); - - // System.out.println("WriteCache, addr: " + offset + ", from: " + - // pos + ", " + md.recordLength + ", thread: " + - // Thread.currentThread().getId()); - /* - * Copy the data into a newly allocated buffer. This is necessary - * because our hold on the backing ByteBuffer for the WriteCache is - * only momentary. As soon as we release() the buffer the data in - * the buffer could be changed. - */ - - final byte[] b = new byte[reclen]; - - final ByteBuffer dst = ByteBuffer.wrap(b); - - // copy the data into [dst] (and the backing byte[]). - dst.put(view); - - // flip buffer for reading. - dst.flip(); - - if (useChecksum) { - - final int chk = tmp.getInt(pos + reclen); - - if (chk != ChecksumUtility.threadChk.get().checksum(b, 0/* offset */, reclen)) { - - // Note: [offset] is a (possibly relative) file offset. - throw new ChecksumError(checkdata()); - - } - - } - - counters.nhit.increment(); - - if (log.isTraceEnabled()) { - log.trace(show(dst, "read bytes")); - } - - return dst; - - } finally { - - release(); - - } - - } - - /** - * Dump some metadata and leading bytes from the buffer onto a - * {@link String}. - * - * @param buf - * The buffer. - * @param prefix - * A prefix for the dump. - * - * @return The {@link String}. - */ - private String show(final ByteBuffer buf, final String prefix) { - final StringBuffer str = new StringBuffer(); - int tpos = buf.position(); - if (tpos == 0) { - tpos = buf.limit(); - } - str.append(prefix + ", length: " + tpos + " : "); - for (int tb = 0; tb < tpos && tb < 20; tb++) { - str.append(Integer.toString(buf.get(tb)) + ","); - } - // log.trace(str.toString()); - return str.toString(); - } - - // private String show(final byte[] buf, int len, final String prefix) { - // final StringBuffer str = new StringBuffer(); - // str.append(prefix + ": "); - // int tpos = len; - // str.append(prefix + ", length: " + tpos + " : "); - // for (int tb = 0; tb < tpos && tb < 20; tb++) { - // str.append(Integer.toString(buf[tb]) + ","); - // } - // // log.trace(str.toString()); - // return str.toString(); - // } - - /** - * Flush the writes to the backing channel but DOES NOT sync the channel and - * DOES NOT {@link #reset()} the {@link WriteCache}. {@link #reset()} is a - * separate operation because a common use is to retain recently flushed - * instances for read-back. - * - * @param force - * When <code>true</code>, the data will be forced to stable - * media. - * - * @throws IOException - * @throws InterruptedException - */ - public void flush(final boolean force) throws IOException, InterruptedException { - - try { - - if (!flush(force, Long.MAX_VALUE, TimeUnit.NANOSECONDS)) { - - throw new RuntimeException(); - - } - - } catch (TimeoutException e) { - - throw new RuntimeException(e); - - } - - } - - /** - * Flush the writes to the backing channel but DOES NOT sync the channel and - * DOES NOT {@link #reset()} the {@link WriteCache}. {@link #reset()} is a - * separate operation because a common use is to retain recently flushed - * instances for read-back. - * - * @param force - * When <code>true</code>, the data will be forced to stable - * media. - * - * @throws IOException - * @throws TimeoutException - * @throws InterruptedException - */ - public boolean flush(final boolean force, final long timeout, - final TimeUnit unit) throws IOException, TimeoutException, - InterruptedException { - - // start time - final long begin = System.nanoTime(); - - // total nanoseconds to wait. - final long nanos = unit.toNanos(timeout); - - // remaining nanoseconds to wait. - long remaining = nanos; - - final WriteCacheCounters counters = this.counters.get(); - - final Lock writeLock = lock.writeLock(); - - if (!writeLock.tryLock(remaining, TimeUnit.NANOSECONDS)) { - - return false; - - } - - try { - - final ByteBuffer tmp = this.buf.get().buffer(); - - if (tmp == null) - throw new IllegalStateException(); - - // #of bytes to write on the disk. - final int nbytes = tmp.position(); - - if (log.isTraceEnabled()) - log.trace("nbytes=" + nbytes + ", firstOffset=" - + getFirstOffset() + ", nflush=" + counters.nflush); - - if (nbytes == 0) { - - // NOP. - return true; - - } - - /* - * Create a view with same offset, limit and position. - * - * Note: The writeOnChannel method is given the view. This prevents - * it from adjusting the position() on the backing buffer. - */ - { - - final ByteBuffer view = tmp.duplicate(); - - // adjust the view to just the dirty record. - view.limit(nbytes); - view.position(0); - - // remaining := (total - elapsed). - remaining = nanos - (System.nanoTime() - begin); - - // write the data on the disk file. - final boolean ret = writeOnChannel(view, getFirstOffset(), - Collections.unmodifiableMap(recordMap), remaining); - - if (!ret) { - throw new TimeoutException("Unable to flush WriteCache"); - } - - counters.nflush++; - - return ret; - - } - - } finally { - - writeLock.unlock(); - - } - - } - - /** - * Debug routine logs @ ERROR additional information when a checksum error - * has been encountered. - * - * @return An informative error message. - * - * @throws InterruptedException - * @throws IllegalStateException - */ - private String checkdata() throws IllegalStateException, InterruptedException { - - if (!useChecksum) { - return "Unable to check since checksums are not enabled"; - } - - ByteBuffer tmp = acquire(); - try { - int nerrors = 0; - int nrecords = recordMap.size(); - - for (Entry<Long, RecordMetadata> ent : recordMap.entrySet()) { - - final RecordMetadata md = ent.getValue(); - - // length of the record w/o checksum field. - final int reclen = md.recordLength - 4; - - // the start of the record in writeCache. - final int pos = md.bufferOffset; - - final int chk = tmp.getInt(pos + reclen); - - // create a view with same offset, limit and position. - final ByteBuffer view = tmp.duplicate(); - - // adjust the view to just the record of interest. - view.limit(pos + reclen); - view.position(pos); - - final byte[] b = new byte[reclen]; - - final ByteBuffer dst = ByteBuffer.wrap(b); - - // copy the data into [dst] (and the backing byte[]). - dst.put(view); - if (chk != ChecksumUtility.threadChk.get().checksum(b, 0/* offset */, reclen)) { - log.error("Bad data for address: " + ent.getKey()); - nerrors++; - } - - } - return "WriteCache checkdata - records: " + nrecords + ", errors: " + nerrors; - } finally { - release(); - } - } - - /** - * Write the data from the buffer onto the channel. This method provides a - * uniform means to request that the buffer write itself onto the backing - * channel, regardless of whether the channel is backed by a file, a socket, - * etc. - * <p> - * Implementations of this method MAY support gathered writes, depending on - * the channel. The necessary information to perform a gathered write is - * present in the <i>recordMap</i>. On the other hand, the implementation - * MAY require that the records in the cache are laid out for a WORM, in - * which case {@link #getFirstOffset()} provides the starting offset for the - * data to be written. The application MUST coordinate the requirements for - * a R/W or WORM store with the use of the {@link WriteCache} and the means - * to write on the backing channel. - * - * @param buf - * The data to be written. Only the dirty bytes are visible in - * this view. The implementation should write all bytes from the - * current position to the limit. - * @param firstOffset - * The offset of the first record in the recordMap into the file - * (may be relative to a base offset within the file). This is - * provided as an optimization for the WORM which writes its - * records contiguously on the backing store. - * @param recordMap - * The mapping of record offsets onto metadata about those - * records. - * @param nanos - * The timeout for the operation in nanoseconds. - * - * @return <code>true</code> if the operation was completed successfully - * within the time alloted. - * - * @throws InterruptedException - * if the thread was interrupted. - * @throws IOException - * if there was an IO problem. - */ - abstract protected boolean writeOnChannel(final ByteBuffer buf, final long firstOffset, - final Map<Long, RecordMetadata> recordMap, final long nanos) throws InterruptedException, TimeoutException, - IOException; - - /** - * {@inheritDoc}. - * <p> - * This implementation clears the buffer, the record map, and other internal - * metadata such that the {@link WriteCache} is prepared to receive new - * writes. - * - * @throws IllegalStateException - * if the write cache is closed. - */ - public void reset() throws InterruptedException { - - final Lock writeLock = lock.writeLock(); - - writeLock.lockInterruptibly(); - - try { - - // // wait until there are no readers using the buffer. - // latch.await(); - - final ByteBuffer tmp = buf.get().buffer(); - - if (tmp == null) { - - // Already closed. - throw new IllegalStateException(); - - } - - // reset all state. - _resetState(tmp); - - } finally { - - writeLock.unlock(); - - } - - } - - /** - * Permanently take the {@link WriteCache} instance out of service. If the - * buffer was allocated by the {@link WriteCache} then it is released back - * to the {@link DirectBufferPool}. After this method is called, records can - * no longer be read from nor written onto the {@link WriteCache}. It is - * safe to invoke this method more than once. - * <p> - * Concurrent {@link #read(long, int)} requests will be serviced if the - * already hold the the read lock but requests will fail once the - * - * @throws InterruptedException - */ - public void close() throws InterruptedException { - - final Lock writeLock = lock.writeLock(); - - writeLock.lockInterruptibly(); - - try { - - // // wait until there are no readers using the buffer. - // latch.await(); - - /* - * Note: This method is thread safe. Only one thread will manage to - * clear the AtomicReference and it will do the rest of the work as - * well. - */ - - // position := 0; limit := capacity. - final IBufferAccess tmp = buf.get(); - - if (tmp == null) { - - // Already closed. - return; - - } - - if (buf.compareAndSet(tmp/* expected */, null/* update */)) { - - try { - - _resetState(tmp.buffer()); - - } finally { - - if (releaseBuffer) { - - tmp.release(); - - } - - } - - } - - } finally { - - writeLock.unlock(); - - } - - } - - /** - * Reset the internal state of the {@link WriteCache} in preparation to - * reuse it to receive more writes. - * <p> - * Note: Keep private unless strong need for override since you can not call - * this method without holding the write lock - * - * @param tmp - */ - private void _resetState(final ByteBuffer tmp) { - - if (tmp == null) - throw new IllegalArgumentException(); - - if (!lock.writeLock().isHeldByCurrentThread()) { - // The caller must be holding the write lock. - throw new IllegalMonitorStateException(); - } - - // clear the index since all records were flushed to disk. - recordMap.clear(); - - // clear to well known invalid offset. - firstOffset.set(-1L); - - // position := 0; limit := capacity. - tmp.clear(); - - if (checker != null) { - - // reset the running checksum of the data written onto the backing - // buffer. - checker.reset(); - - } - - // Martyn: I moved your debug flag here so it is always cleared by - // reset(). - m_written = false; - - m_closedForWrites = false; - - } - - /** - * Return the RMI message object that will accompany the payload from the - * {@link WriteCache} when it is replicated along the write pipeline. - * - * @return cache A {@link WriteCache} to be replicated. - */ - final public IHAWriteMessage newHAWriteMessage(// - final long quorumToken, - final long lastCommitCounter,// - final long lastCommitTime// - ) { - - return new HAWriteMessage( - lastCommitCounter,// - lastCommitTime,// - sequence, // - bytesWritten(), getWholeBufferChecksum(), - prefixWrites ? StoreTypeEnum.RW : StoreTypeEnum.WORM, - quorumToken, fileExtent.get(), firstOffset.get()); - - } - - /** - * The current performance counters. - */ - protected final AtomicReference<WriteCacheCounters> counters = new AtomicReference<WriteCacheCounters>( - new WriteCacheCounters()); - - /** - * Sets the performance counters to be used by the write cache. A service - * should do this if you want to aggregate the performance counters across - * multiple {@link WriteCache} instances. - * - * @param newVal - * The shared performance counters. - * - * @throws IllegalArgumentException - * if the argument is <code>null</code>. - */ - void setCounters(final WriteCacheCounters newVal) { - - if (newVal == null) - return; - - this.counters.set(newVal); - - } - - /** - * Return the performance counters for the write cacher. - */ - public CounterSet getCounters() { - - return counters.get().getCounters(); - - } - - /** - * Performance counters for the {@link WriteCache}. - * <p> - * Note: thread-safety is required for: {@link #nhit} and {@link #nmiss}. - * The rest should be Ok without additional synchronization, CAS operators, - * etc (mainly because they are updated while holding a lock). - * - * @author <a href="mailto:tho...@us...">Bryan - * Thompson</a> - */ - public static class WriteCacheCounters { - - /* - * read on the cache. - */ - - /** - * #of read requests that are satisfied by the write cache. - */ - public final CAT nhit = new CAT(); - - /** - * The #of read requests that are not satisfied by the write cache. - */ - public final CAT nmiss = new CAT(); - - /* - * write on the cache. - */ - - /** - * #of records accepted for eventual write onto the backing channel. - */ - public long naccept; - - /** - * #of bytes accepted for eventual write onto the backing channel. - */ - public long bytesAccepted; - - /* - * write on the channel. - */ - - /** - * #of times {@link IWriteCache#flush(boolean)} was called. - */ - public long nflush; - - /** - * #of writes on the backing channel. Note that some write cache - * implementations do ordered writes and will therefore do one write per - * record while others do append only and therefore do one write per - * write cache flush. Note that in both cases we may have to redo a - * write if the backing channel was concurrently closed, so the value - * here can diverge from the #of accepted records and the #of requested - * flushes. - */ - public long nwrite; - - /** - * #of bytes written onto the backing channel. - */ - public long bytesWritten; - - /** - * Total elapsed time writing onto the backing channel. - */ - public long elapsedWriteNanos; - - public CounterSet getCounters() { - - final CounterSet root = new CounterSet(); - - /* - * read on the cache. - */ - - root.addCounter("nhit", new Instrument<Long>() { - public void sample() { - setValue(nhit.get()); - } - }); - - root.addCounter("nmiss", new Instrument<Long>() { - public void sample() { - setValue(nmiss.get()); - } - }); - - root.addCounter("hitRate", new Instrument<Double>() { - public void sample() { - final long nhit = WriteCacheCounters.this.nhit.get(); - final long ntests = nhit + WriteCacheCounters.this.nmiss.get(); - setValue(ntests == 0L ? 0d : (double) nhit / ntests); - } - }); - - /* - * write on the cache. - */ - - // #of records accepted by the write cache. - root.addCounter("naccept", new Instrument<Long>() { - public void sample() { - setValue(naccept); - } - }); - - // #of bytes in records accepted by the write cache. - root.addCounter("bytesAccepted", new Instrument<Long>() { - public void sample() { - setValue(bytesAccepted); - } - }); - - /* - * write on the channel. - */ - - // #of times the write cache was flushed to the backing channel. - root.addCounter("nflush", new Instrument<Long>() { - public void sample() { - setValue(nflush); - } - }); - - // #of writes onto the backing channel. - root.addCounter("nwrite", new Instrument<Long>() { - public void sample() { - setValue(nwrite); - } - }); - - // #of bytes written onto the backing channel. - root.addCounter("bytesWritten", new Instrument<Long>() { - public void sample() { - setValue(bytesWritten); - } - }); - - // average bytes per write (will under report if we must retry - // writes). - root.addCounter("bytesPerWrite", new Instrument<Double>() { - public void sample() { - final double bytesPerWrite = (nwrite == 0 ? 0d : (bytesWritten / (double) nwrite)); - setValue(bytesPerWrite); - } - }); - - // elapsed time writing on the backing channel. - root.addCounter("writeSecs", new Instrument<Double>() { - public void sample() { - setValue(elapsedWriteNanos / 1000000000.); - } - }); - - return root; - - } // getCounters() - - public String toString() { - - return getCounters().toString(); - - } - - } // class WriteCacheCounters - - /** - * A {@link WriteCache} implementation suitable for an append-only file such - * as the {@link WORMStrategy} or the output file of the - * {@link IndexSegmentBuilder}. - * - * @author <a href="mailto:tho...@us...">Bryan - * Thompson</a> - */ - public static class FileChannelWriteCache extends WriteCache { - - /** - * An offset which will be applied to each record written onto the - * backing {@link FileChannel}. The offset is generally the size of the - * root blocks for a journal or the checkpoint record for an index - * segment. It can be zero if you do not have anything at the head of - * the file. - * <p> - * Note: This implies that writing the root blocks is done separately in - * the protocol since you can't write below this offset otherwise. - */ - final protected long baseOffset; - - /** - * Used to re-open the {@link FileChannel} in this class. - */ - public final IReopenChannel<FileChannel> opener; - - /** - * @param baseOffset - * An offset - * @param buf - * @param opener - * - * @throws InterruptedException - */ - public FileChannelWriteCache(final long baseOffset, - final IBufferAccess buf, final boolean useChecksum, - final boolean isHighlyAvailable, final boolean bufferHasData, - final IReopenChannel<FileChannel> opener, - final long fileExtent) - throws InterruptedException { - - super(buf, false/* scatteredWrites */, useChecksum, - isHighlyAvailable, bufferHasData, fileExtent); - - if (baseOffset < 0) - throw new IllegalArgumentException(); - - if (opener == null) - throw new IllegalArgumentException(); - - this.baseOffset = baseOffset; - - this.opener = opener; - - } - - @Override - protected boolean writeOnChannel(final ByteBuffer data, - final long firstOffset, - final Map<Long, RecordMetadata> recordMap, final long nanos) - throws InterruptedException, IOException { - - final long begin = System.nanoTime(); - - final int nbytes = data.remaining(); - - /* - * The position in the file at which the record will be written. - */ - final long pos = baseOffset + firstOffset; - - /* - * Write bytes in [data] from position to limit onto the channel. - * - * @todo This ignores the timeout. - */ - final int nwrites = FileChannelUtility.writeAll(opener, data, pos); - - final WriteCacheCounters counters = this.counters.get(); - counters.nwrite += nwrites; - counters.bytesWritten += nbytes; - counters.elapsedWriteNanos += (System.nanoTime() - begin); - - return true; - - } - - } - - /** - * The scattered write cache is used by the {@link RWStore} since the writes - * can be made to any part of the file assigned for data allocation. - * <p> - * The writeonChannel must therefore utilize the {@link RecordMetadata} to - ... [truncated message content] |
From: <tho...@us...> - 2012-11-28 16:32:55
|
Revision: 6730 http://bigdata.svn.sourceforge.net/bigdata/?rev=6730&view=rev Author: thompsonbry Date: 2012-11-28 16:32:48 +0000 (Wed, 28 Nov 2012) Log Message: ----------- The bug identified above has been fixed. Compaction now works with HA. We send along the address notifications and address recycle notices in an HA message to the followers. @see https://sourceforge.net/apps/trac/bigdata/ticket/621 (Coalesce records in write cache) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2012-11-27 00:08:51 UTC (rev 6729) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2012-11-28 16:32:48 UTC (rev 6730) @@ -33,6 +33,8 @@ import java.util.Collection; import java.util.Collections; import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; @@ -293,6 +295,16 @@ */ public final int latchedAddr; + /** + * Set <code>true</code> when the record is deleted. + * <p> + * Note: The {@link RecordMetadata} is removed from the + * {@link WriteCache#recordMap} when the record is deleted. This flag is + * only visible if the {@link RecordMetadata} was entered onto the + * {@link WriteCache#orderedRecords} list. + */ + private volatile boolean deleted; + public RecordMetadata(final long fileOffset, final int bufferOffset, final int recordLength, final int latchedAddr) { @@ -304,12 +316,15 @@ this.latchedAddr = latchedAddr; + this.deleted = false; + } public String toString() { return getClass().getSimpleName() + "{fileOffset=" + fileOffset - + ",off=" + bufferOffset + ",len=" + recordLength + "}"; + + ",bufferOffset=" + bufferOffset + ",len=" + recordLength + + ", delete=" + deleted + "}"; } @@ -324,6 +339,18 @@ final private ConcurrentMap<Long/* fileOffset */, RecordMetadata> recordMap; /** + * An ordered list of the {@link RecordMetadata} in the order in which those + * records were created. This is maintained only for HA. It is used to + * communicate the allocations and deletes to a downstream RWS HA follower. + * The RWS follower relies on the ordered presentation of the addresses to + * infer the order in which the allocators were created, the size of the + * regions managed by those allocators, and the order in which the + * allocators appear in the allocator list (this is the same as the order of + * the creation of those allocators). + */ + final private List<RecordMetadata> orderedRecords; + + /** * The offset of the first record written onto the {@link WriteCache}. This * information is used when {@link #appendOnly} is <code>true</code> as it * gives the starting offset at which the entire {@link ByteBuffer} may be @@ -533,6 +560,21 @@ recordMap = new ConcurrentHashMap<Long, RecordMetadata>(indexDefaultCapacity); } + if (isHighlyAvailable && !bufferHasData) { + + /* + * Only in HA mode, and not when we are processing a raw write cache + * buffer replicated from the leader. + */ + + orderedRecords = new LinkedList<WriteCache.RecordMetadata>(); + + } else { + + orderedRecords = null; + + } + if (bufferHasData) { /* * Populate the record map from the record. @@ -637,7 +679,7 @@ * buffer (and hence it has fewer bytes remaining than might otherwise * be expected). */ - final public boolean isEmpty() { + final boolean isEmpty() { return recordMap.isEmpty(); @@ -861,7 +903,11 @@ * Add metadata for the record so it can be read back from the * cache. */ - if (recordMap.put(Long.valueOf(offset), new RecordMetadata(offset, pos, datalen, latchedAddr)) != null) { + + final RecordMetadata md = new RecordMetadata(offset, pos, datalen, + latchedAddr); + + if (recordMap.put(Long.valueOf(offset), md) != null) { /* * Note: This exception indicates that the abort protocol did * not reset() the current write cache before new writes were @@ -869,7 +915,13 @@ */ throw new AssertionError("Record exists for offset in cache: offset=" + offset); } + + if (orderedRecords != null) { + orderedRecords.add(md); + + } + if (log.isTraceEnabled()) { // @todo rather than hashCode() set a // buffer# on each WriteCache instance. log.trace("offset=" + offset + ", pos=" + pos + ", nwrite=" + nwrite + ", writeChecksum=" @@ -1410,6 +1462,9 @@ // clear the index since all records were flushed to disk. recordMap.clear(); + + if (orderedRecords != null) + orderedRecords.clear(); // clear to well known invalid offset. firstOffset.set(-1L); @@ -1732,37 +1787,79 @@ final Map<Long, RecordMetadata> recordMap) { recordMap.clear(); -// final int sp = buf.position(); // start position. final int limit = buf.limit(); // end position. int pos = buf.position(); // start position -// buf.limit(sp); -// int nwrite = 0; while (pos < limit) { buf.position(pos); - final long addr = buf.getLong(); // 8 bytes - if (addr == 0L) { // end of content - break; - } - final int sze = buf.getInt(); // 4 bytes. - final int latchedAddr = buf.getInt(); // 4 bytes. - if (sze == 0 /* old style deleted */) { - /* - * Should only happen if a previous write was already made - * to the buffer but the allocation has since been freed. - */ - recordMap.remove(addr); - removeAddress(latchedAddr); - } else if (addr < 0 /* new style deleted */) { - if (recordMap.get(addr) != null) { + // 8 bytes (negative iff record is deleted) + final long fileOffset = buf.getLong(); + assert fileOffset != 0L; + // 4 bytes (negative iff no data follows) + final int recordLength = buf.getInt(); + assert recordLength != 0; + // 4 bytes + final int latchedAddr = buf.getInt(); +// if (sze == 0 /* old style deleted */) { +// /* +// * Should only happen if a previous write was already made +// * to the buffer but the allocation has since been freed. +// */ +// recordMap.remove(addr); +// removeAddress(latchedAddr); + if (fileOffset < 0 /* new style deleted */) { + if (recordMap.get(fileOffset) != null) { // Should have been removed already. throw new AssertionError(); } - } else if (sze > 0) { - recordMap.put(addr, new RecordMetadata(addr, pos + SIZEOF_PREFIX_WRITE_METADATA, sze, latchedAddr)); - addAddress(latchedAddr, sze); + /* + * Make sure that the address is declared. This covers the + * case where a record is allocated and then recycled before + * the WriteCache in which it was recorded is evicted from + * the dirtyList. This can happen when we are not + * compacting, as well as when we are compacting. + * + * Note: RWS will interpret a -recordLength as notification + * of the existence of an allocator for that address but + * will not create an actual allocation for that address at + * this time. + */ + // Ensure allocator exists (allocation may or may not be + // created). + addAddress(latchedAddr, recordLength); + if (recordLength > 0) { + // Delete allocation. + removeAddress(latchedAddr); + } + } else { + /* + * Note: Do not enter things into [orderedRecords] on the + * follower. + */ + if (recordLength < 0) { + /* + * Notice of allocation. + * + * Note: recordLength is always negative for this code + * path. The RWS will interpret the -recordLength as + * notification of the existence of an allocator for + * that address but will not create an actual allocation + * for that address at this time. + */ + addAddress(latchedAddr, recordLength); + } else { + /* + * Actual allocation with data. + */ + final RecordMetadata md = new RecordMetadata( + fileOffset, pos + SIZEOF_PREFIX_WRITE_METADATA, + recordLength, latchedAddr); + recordMap.put(fileOffset, md); + addAddress(latchedAddr, recordLength); + } } -// nwrite++; - pos += SIZEOF_PREFIX_WRITE_METADATA + sze; // skip header (addr + sze) and data + // skip header (addr + sze + latchedAddr) and data (if any) + pos += (SIZEOF_PREFIX_WRITE_METADATA + (recordLength > 0 ? recordLength + : 0)); } } @@ -1865,6 +1962,8 @@ throw new AssertionError(); } + removed.deleted = true; + if (!prefixWrites) { /* * We will not record a deleted record. We are not in HA mode. @@ -2104,10 +2203,13 @@ * This method handles prefixWrites and useChecksum to transfer the correct * bytes for the associated {@link RecordMetadata}. * + * @param src + * The source buffer. * @param dst * Records are transferred into the <i>dst</i> {@link WriteCache} * . - * @param writeCacheService + * @param serviceRecordMap + * The {@link WriteCacheService}'s record map. * * @return Returns true if the transfer is complete, or false if the * destination runs out of room. @@ -2259,6 +2361,96 @@ lock.unlock(); } } + + /** + * Apply the {@link #orderedRecords} to create a dense {@link WriteCache} + * buffer that presents the addresses from the {@link #recordMap} along with + * enough metadata to decide whether this is a delete or merely an address + * declaration. Address declarations are modeled by setting the record size + * to a negative value. Address deletes are modeled by setting the + * fileOffset to a negative value. Actual address writes are not + * communicated through this method, but their data will eventually make it + * to the follower if the address is not recycled before the + * {@link WriteCache} holding that data is communicated to the follower (in + * which case the follower will eventually see the delete marker for the + * address instead of the application data for the address). + * + * @return true unless there is nothing in the {@link WriteCache}. + * + * @throws InterruptedException + * @throws IllegalStateException + */ + boolean prepareAddressMetadataForHA() throws IllegalStateException, + InterruptedException { + + if (!prefixWrites) + throw new IllegalStateException(); + + if (orderedRecords == null) + throw new IllegalStateException(); + + if (orderedRecords.isEmpty()) { + + return false; + + } + + final ByteBuffer tmp = acquire(); + + try { + + /* + * Note: We need to be synchronized on the ByteBuffer here + * since this operation relies on the position() being + * stable. + * + * Note: Also see clearAddrMap(long) which is synchronized + * on the acquired ByteBuffer in the same manner to protect + * it during critical sections which have a side effect on + * the buffer position. + */ + + synchronized (tmp) { + + tmp.position(0); + tmp.limit(tmp.capacity()); + + for (RecordMetadata md : orderedRecords) { + + if (md.deleted) { + /* + * Entry is address of deleted record. No application + * data follows the entry (the next thing in the buffer + * will be another entry). + */ + tmp.putLong(-md.fileOffset); + tmp.putInt(-md.recordLength); + } else { + /* + * Entry is notice of non-deleted address. No + * application data follows the entry (the next thing in + * the buffer will be another entry). + */ + tmp.putLong(md.fileOffset); + tmp.putInt(-md.recordLength); + } + tmp.putInt(md.latchedAddr); + + } // next RecordMetadata + + } // synchronized(tmp) + + orderedRecords.clear(); + + return true; + + } finally { + + release(); + + } + + } /** * Overridden by Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2012-11-27 00:08:51 UTC (rev 6729) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2012-11-28 16:32:48 UTC (rev 6730) @@ -485,7 +485,7 @@ this.useChecksum = useChecksum; - this.compactionEnabled = compactionThreshold < 100; + this.compactionEnabled = canCompact() && compactionThreshold < 100; // this.opener = opener; @@ -505,7 +505,7 @@ /* * Configure the desired dirtyListThreshold. */ - if(canCompact()){ + if (compactionEnabled) { /* * Setup the RWS dirtyListThreshold. */ @@ -755,13 +755,25 @@ * bulk data load, it is not uncommon for all records to be * recycled by the time we take something from the dirtyList, in * which case the cache will be (logically) empty. + * + * Note: This test (WriteCache.isEmpty()) is not decisive + * because we are not holding any locks across it and the + * subsequent actions. Therefore, it is possible that the cache + * will become empty after it has been tested through concurrent + * clearWrite() invocations. That should not be a problem. We + * want to leave the cache open (versus closing it against + * writes) in case we decide to compact the cache rather than + * evicting it. The cache MUST NOT be closed for writes when we + * compact it or we will lose the ability to clear recycled + * records out of that WriteCache. */ + final boolean wasEmpty = cache.isEmpty(); if (!wasEmpty) { final int percentEmpty = cache.potentialCompaction(); - + if (compactionEnabled && !flush //&& cache.canCompact() && percentEmpty >= compactionThreshold) { @@ -852,8 +864,17 @@ * @throws InterruptedException */ private boolean compactCache(final WriteCache cache) - throws InterruptedException { + throws InterruptedException, Exception { + /* + * The cache should not be closed against writes. If it were closed + * for writes, then we would no longer be able to capture cleared + * writes in the RecordMap. However, if we compact the cache, we + * want any cleared writes to be propagated into the compacted + * cache. + */ + assert !cache.isClosedForWrites(); + final WriteCache reserve = getReserve(); if (reserve == null) { @@ -888,6 +909,7 @@ done = WriteCache.transferTo(cache/* src */, curCompactingCache/* dst */, recordMap); if (done) { + sendAddressMetadata(cache); /* * Return reserve to the cleanList. * @@ -932,6 +954,7 @@ if (log.isDebugEnabled()) log.debug("USING RESERVE: curCompactingCache.bytesWritten=" + curCompactingCache.bytesWritten()); + sendAddressMetadata(cache); // Buffer was compacted. return true; } finally { @@ -948,6 +971,42 @@ } // compactCache() + /** + * In HA, we need to notify a downstream RWS of the addresses that have + * been allocated on the leader in the same order in which the leader + * made those allocations. This information is used to infer the order + * in which the allocators for the different allocation slot sizes are + * created. This method will synchronous send those address notices and + * and also makes sure that the followers see the recycled addresses + * records so they can keep both their allocators and the actual + * allocations synchronized with the leader. + * + * @param cache + * A {@link WriteCache} whose data has been transfered into + * another {@link WriteCache} through a "compact" operation. + * + * @throws IllegalStateException + * @throws InterruptedException + * @throws ExecutionException + * @throws IOException + */ + private void sendAddressMetadata(final WriteCache cache) + throws IllegalStateException, InterruptedException, + ExecutionException, IOException { + + if (quorum == null || !quorum.isHighlyAvailable() + || !quorum.getClient().isLeader(quorumToken)) { + return; + } + + if (cache.prepareAddressMetadataForHA()) { + + writeCacheBlock(cache); + + } + + } + private WriteCache getReserve() throws InterruptedException { cleanListLock.lockInterruptibly(); try { @@ -988,6 +1047,11 @@ * Get a dirty cache buffer. Unless we are flushing out the buffered * writes, we will allow the dirtyList to grow to the desired threshold * before we attempt to compact anything. + * <p> + * Note: This DOES NOT remove the {@link WriteCache} from the + * {@link #dirtyList}. It uses a peek(). The {@link WriteCache} will + * remain on the {@link #dirtyList} until it has been handled by + * {@link #doRun()}. * * @return A dirty {@link WriteCache}. */ @@ -1067,11 +1131,15 @@ */ cache.closeForWrites(); + { + final ByteBuffer b = cache.peek(); + if (b.position() == 0) + return; + } + // increment writeCache sequence cache.setSequence(cacheSequence++); - final IHAWriteMessage msg; - if (quorum != null && quorum.isHighlyAvailable()) { // Verify quorum still valid and we are the leader. @@ -1095,7 +1163,7 @@ assert quorumMember != null : "Not quorum member?"; - msg = cache.newHAWriteMessage(quorumToken,// + final IHAWriteMessage msg = cache.newHAWriteMessage(quorumToken,// quorumMember.getLastCommitCounter(),// quorumMember.getLastCommitTime(),// checksumBuffer @@ -1107,6 +1175,12 @@ * TODO When adding support for asynchronous replication we will * have to ensure that the followers log the write cache blocks * exactly once. They currently do this in HAJournalService. + * + * Note: The WriteCacheService absorbs a lot of latency, but we + * are still going to have that increased latency when it comes + * time to flush() the data to the followers. Going asynchronous + * with the dirty list replication would reduce that latency (as + * would compacting the dirty list buffers when [flush:=true]. */ quorumMember.logWriteCacheBlock(msg, b.duplicate()); @@ -1115,10 +1189,6 @@ counters.get().nsend++; - } else { - - msg = null; - } /* @@ -1689,19 +1759,29 @@ final int saveDirtyListThreshold = m_dirtyListThreshold; try { + /* + * Force WriteTask.call() to evict anything in the cache. + * + * Note: We need to wait until the dirtyList has been evicted + * before writing out the compacting cache (if any) and then + * finally drop the compactingCache onto the cleanList. Or have + * a 2-stage flush. + * + * FIXME We want it to continue to compact the cache buffers + * during flush so it always outputs dense buffers. The code + * right now will NOT compact the cache buffers when flush is + * true. The behavior when flush:=true should be modified to + * compact the buffer and then write it out rather than dropping + * it back onto the dirty list. + */ m_dirtyListThreshold = 1; flush = true; - // Add any active compactingCache to dirty list - if (compactingCache != null) { - final WriteCache tmp2 = compactingCache; - compactingCache = null; - dirtyList.add(tmp2); - counters.get().ndirty++; - } /* + * Wait until the dirtyList has been emptied. + * * Note: [tmp] may be empty, but there is basically zero cost in - * WriteTask to process and empty buffer and, done this way, the + * WriteTask to process an empty buffer and, done this way, the * code is much less complex here. */ dirtyList.add(tmp); @@ -1714,6 +1794,27 @@ throw new TimeoutException(); } } + /* + * Add the [compactingCache] (if any) to dirty list and spin it + * down again. + * + * Note: We can not drop the compactingCache onto the dirtyList + * until the dirtyList has been spun down to empty. + */ + if (compactingCache != null) { + final WriteCache tmp2 = compactingCache; + compactingCache = null; + dirtyList.add(tmp2); + counters.get().ndirty++; + dirtyListChange.signalAll(); + while (!dirtyList.isEmpty() && !halt) { + // remaining := (total - elapsed). + remaining = nanos - (System.nanoTime() - begin); + if (!dirtyListEmpty.await(remaining, TimeUnit.NANOSECONDS)) { + throw new TimeoutException(); + } + } + } if (halt) throw new RuntimeException(firstCause.get()); } finally { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2012-11-27 00:08:51 UTC (rev 6729) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2012-11-28 16:32:48 UTC (rev 6730) @@ -819,49 +819,66 @@ /** * Called from WriteCache.resetRecordMapFromBuffer * - * If a FixedAllocator already exists for the address then just set the address as active, - * otherwise, create a new allocator and try again, which should work second time around - * if we are correctly in sync. + * If a FixedAllocator already exists for the address then just set the + * address as active, otherwise, create a new allocator and try again, which + * should work second time around if we are correctly in sync. * * @param latchedAddr + * The latched address. * @param size + * The size of the application data -or- <code>-size</code> if + * this provides notice of the existence of an allocator for that + * <i>latchedAddr</i> but the address itself should not yet be + * allocated. */ void addAddress(final int latchedAddr, final int size) { - // ignore zero address - if (latchedAddr == 0) - return; - - m_allocationLock.lock(); - try { - FixedAllocator alloc = null; - try { - alloc = getBlock(latchedAddr); - } catch (final PhysicalAddressResolutionException par) { - // Must create new allocator - } - if (alloc == null) { - final int i = fixedAllocatorIndex(size); - final int block = 64 * m_allocSizes[i]; - final ArrayList<FixedAllocator> list = m_freeFixed[i]; - final FixedAllocator allocator = new FixedAllocator(this, block); - - allocator.setFreeList(list); - allocator.setIndex(m_allocs.size()); - - m_allocs.add(allocator); - - // Check correctly synchronized creation - assert allocator == getBlock(latchedAddr); - - alloc = allocator; - } - - assert size <= alloc.getSlotSize(); - - alloc.setAddressExternal(latchedAddr); - } finally { - m_allocationLock.unlock(); - } + // ignore zero address + if (latchedAddr == 0) + return; + + m_allocationLock.lock(); + try { + FixedAllocator alloc = null; + try { + alloc = getBlock(latchedAddr); + } catch (final PhysicalAddressResolutionException par) { + // Must create new allocator + } + final int size2 = size < 0 ? -size : size; + if (alloc == null) { + final int i = fixedAllocatorIndex(size2); + final int block = 64 * m_allocSizes[i]; + final ArrayList<FixedAllocator> list = m_freeFixed[i]; + final FixedAllocator allocator = new FixedAllocator(this, block); + + allocator.setFreeList(list); + allocator.setIndex(m_allocs.size()); + + m_allocs.add(allocator); + + // Check correctly synchronized creation + assert allocator == getBlock(latchedAddr); + + alloc = allocator; + } + + assert size2 <= alloc.getSlotSize(); + + if (size > 0) { + + /* + * This is a real allocation. + */ + + alloc.setAddressExternal(latchedAddr); + + } + + } finally { + + m_allocationLock.unlock(); + + } } /** Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2012-11-27 00:08:51 UTC (rev 6729) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2012-11-28 16:32:48 UTC (rev 6730) @@ -85,7 +85,8 @@ suite.addTestSuite(TestHAJournalServer.class); // HA2 test suite (k=3, but only 2 services are running). - suite.addTestSuite(TestHA2JournalServer.class); + // FIXME Enable TestHA2JournalServer in CI (debug bounce leader/follower first). +// suite.addTestSuite(TestHA2JournalServer.class); // HA3 test suite. suite.addTestSuite(TestHA3JournalServer.class); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-11-29 16:49:43
|
Revision: 6741 http://bigdata.svn.sourceforge.net/bigdata/?rev=6741&view=rev Author: thompsonbry Date: 2012-11-29 16:49:35 +0000 (Thu, 29 Nov 2012) Log Message: ----------- Added optimization by Martyn for B+Tree byte[1] for the RDF Statement values to both SPOTupleSerializer and FastRDFValueCoder2. Modified the WriteCacheService to always set the fileExtent immediately before writing out the cache block to the wire or local disk. This deals with a problem where the compacting cache could have a stale fileOffset. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/FastRDFValueCoder2.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOTupleSerializer.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/RDFValueFactory.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2012-11-29 01:27:11 UTC (rev 6740) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2012-11-29 16:49:35 UTC (rev 6741) @@ -324,7 +324,7 @@ return getClass().getSimpleName() + "{fileOffset=" + fileOffset + ",bufferOffset=" + bufferOffset + ",len=" + recordLength - + ", delete=" + deleted + "}"; + + ",delete=" + deleted + "}"; } @@ -470,7 +470,7 @@ * When <code>null</code> a buffer will be allocated for you from * the {@link DirectBufferPool}. Buffers allocated on your behalf * will be automatically released by {@link #close()}. - * @param scatteredWrites + * @param prefixWrites * <code>true</code> iff the implementation uses scattered * writes. The RW store uses scattered writes since its updates * are written to different parts of the backing file. The WORM @@ -496,9 +496,10 @@ * * @throws InterruptedException */ - public WriteCache(IBufferAccess buf, final boolean scatteredWrites, final boolean useChecksum, - final boolean isHighlyAvailable, final boolean bufferHasData, - final long fileExtent) throws InterruptedException { + public WriteCache(IBufferAccess buf, final boolean prefixWrites, + final boolean useChecksum, final boolean isHighlyAvailable, + final boolean bufferHasData, final long fileExtent) + throws InterruptedException { if (bufferHasData && buf == null) throw new IllegalArgumentException(); @@ -521,7 +522,7 @@ // this.quorumManager = quorumManager; this.useChecksum = useChecksum; - this.prefixWrites = scatteredWrites; + this.prefixWrites = prefixWrites; if (isHighlyAvailable && !bufferHasData) { // Note: No checker if buffer has data. @@ -570,7 +571,7 @@ * better with concurrency, so we should benchmark this option for * non-scattered writes as well. */ - if (scatteredWrites) { + if (prefixWrites) { recordMap = new ConcurrentSkipListMap<Long, RecordMetadata>(); } else { recordMap = new ConcurrentHashMap<Long, RecordMetadata>(indexDefaultCapacity); @@ -609,6 +610,8 @@ + "{recordCount=" + recordMap.size()// + ",firstOffset=" + firstOffset// + ",releaseBuffer=" + releaseBuffer// + + ",prefixWrites=" + prefixWrites// + + ",useChecksum=" + useChecksum// + ",bytesWritten=" + bytesWritten()// + ",bytesRemaining=" + remaining()// + ",bytesRemoved=" + m_removed// @@ -973,48 +976,48 @@ } - /** - * This method supports - * {@link #transferTo(WriteCache, WriteCache, ConcurrentMap)} and provides a - * low-level code path for copying records into <i>this</i> buffer from the - * buffer specified by the caller. - * <p> - * Note: This method is only invoked by transferTo(). We need to check its - * assumptions in more depth regarding synchronization before invoking from - * any other context. - */ - private boolean writeRaw(final long offset, final ByteBuffer bb, - final int latchedAddr) throws IllegalStateException, - InterruptedException { +// /** +// * This method supports +// * {@link #transferTo(WriteCache, WriteCache, ConcurrentMap)} and provides a +// * low-level code path for copying records into <i>this</i> buffer from the +// * buffer specified by the caller. +// * <p> +// * Note: This method is only invoked by transferTo(). We need to check its +// * assumptions in more depth regarding synchronization before invoking from +// * any other context. +// */ +// private boolean writeRaw(final long offset, final ByteBuffer bb, +// final int latchedAddr) throws IllegalStateException, +// InterruptedException { +// +// assert !m_closedForWrites; +// +// final int len = bb.limit() - bb.position(); +// +// assert len <= remaining(); +// +// final ByteBuffer tmp = acquire(); +// try { +// final int pos; +// final int prefix = (prefixWrites ? SIZEOF_PREFIX_WRITE_METADATA : 0); +// final int datalen = len - prefix; +// synchronized (tmp) { +// pos = tmp.position(); +// tmp.put(bb); +// } +// final RecordMetadata old = recordMap.put(Long.valueOf(offset), +// new RecordMetadata(offset, pos + prefix, datalen, +// latchedAddr)); +// if (old != null) { +// throw new IllegalStateException("Write already found at " +// + offset); +// } +// return true; +// } finally { +// release(); +// } +// } - assert !m_closedForWrites; - - final int len = bb.limit() - bb.position(); - - assert len <= remaining(); - - final ByteBuffer tmp = acquire(); - try { - final int pos; - final int prefix = (prefixWrites ? SIZEOF_PREFIX_WRITE_METADATA : 0); - final int datalen = len - prefix; - synchronized (tmp) { - pos = tmp.position(); - tmp.put(bb); - } - final RecordMetadata old = recordMap.put(Long.valueOf(offset), - new RecordMetadata(offset, pos + prefix, datalen, - latchedAddr)); - if (old != null) { - throw new IllegalStateException("Write already found at " - + offset); - } - return true; - } finally { - release(); - } - } - /** * {@inheritDoc} * @@ -1565,8 +1568,7 @@ * <p> * Note: <code>volatile</code> since not guarded by any lock. */ - // package private : exposed to canCompact() in subclass. - volatile int m_removed; + private volatile int m_removed; /** * Sets the performance counters to be used by the write cache. A service @@ -2048,12 +2050,13 @@ * @param serviceRecordMap * the map of the WriteCacheService that associates an address * with a WriteCache - * @param fileExtent - * the current extent of the backing file. * @throws InterruptedException */ - void resetWith(final ConcurrentMap<Long, WriteCache> serviceRecordMap, - final long fileExtent) throws InterruptedException { +// * @param fileExtent +// * the current extent of the backing file. + void resetWith(final ConcurrentMap<Long, WriteCache> serviceRecordMap +// final long fileExtentIsIgnored + ) throws InterruptedException { final Iterator<Long> entries = recordMap.keySet().iterator(); @@ -2062,7 +2065,8 @@ log.info("resetting existing WriteCache: nrecords=" + recordMap.size() + ", hashCode=" + hashCode()); while (entries.hasNext()) { - final Long addr = entries.next(); + + final Long fileOffset = entries.next(); /* * We need to guard against the possibility that the entry in @@ -2073,9 +2077,9 @@ * Using the conditional remove on ConcurrentMap guards against * this. */ - final boolean removed = serviceRecordMap.remove(addr, this); + final boolean removed = serviceRecordMap.remove(fileOffset, this); - registerWriteStatus(addr, 0, removed ? 'R' : 'L'); + registerWriteStatus(fileOffset, 0, removed ? 'R' : 'L'); } @@ -2090,7 +2094,7 @@ } reset(); // must ensure reset state even if cache already empty - setFileExtent(fileExtent); +// setFileExtent(fileExtent); } @@ -2309,54 +2313,80 @@ * guaranteeing that no writes will be applied to [src]). */ final ByteBuffer bb = src.acquire().duplicate(); + ByteBuffer dd = null; try { - - final int chklen = 0; // useChecksum ? 4 : 0; + // Setup destination + dd = dst.acquire(); + // Note: md.recordLength includes the checksum (suffix) final int prefixlen = src.prefixWrites ? SIZEOF_PREFIX_WRITE_METADATA : 0; - final int xtralen = chklen + prefixlen; final Set<Entry<Long, RecordMetadata>> es = src.recordMap.entrySet(); final Iterator<Entry<Long, RecordMetadata>> entries = es.iterator(); while (entries.hasNext()) { final Entry<Long, RecordMetadata> entry = entries.next(); - final long offset = entry.getKey(); // file offset. + final long fileOffset = entry.getKey(); // file offset. final RecordMetadata md = entry.getValue(); if (serviceRecordMap != null) { - final WriteCache tmp = serviceRecordMap.get(offset); + final WriteCache tmp = serviceRecordMap.get(fileOffset); if (tmp == null) throw new AssertionError("Not owned: offset=" - + offset + ", md=" + md); + + fileOffset + ", md=" + md); else if (tmp != src) throw new AssertionError( "Record not owned by this cache: src=" + src + ", owner=" + tmp - + ", offset=" + offset + ", md=" + + ", offset=" + fileOffset + ", md=" + md); } - - final int len = md.recordLength + xtralen; + assert !md.deleted; // not deleted (deleted entries should not be in the recordMap). + final int len = prefixlen + md.recordLength; final int dstremaining = dst.remaining(); if (len > dstremaining) { // Not enough room in destination for this record. if (dstremaining >= 512) { - // Destinaction still has room, keep looking. + // Destination still has room, keep looking. continue; } // Destination is full (or full enough). return false; } - final ByteBuffer dup = bb;//bb.duplicate(); (dup'd above). +// final ByteBuffer dup = bb;//bb.duplicate(); (dup'd above). final int pos = md.bufferOffset - prefixlen;// include prefix final int limit = pos + len; // and any postfix - dup.limit(limit); - dup.position(pos); - dst.writeRaw(offset, dup, md.latchedAddr); - - if (dst.remaining() != (dstremaining - len)) { - throw new AssertionError("dst.remaining(): " + dst.remaining() + " expected: " + dstremaining); + final int dstoff; // offset in the destination buffer. + synchronized (bb) { + bb.limit(limit); + bb.position(pos); + // dst.writeRaw(fileOffset, dup, md.latchedAddr); + + // Copy to destination. + synchronized (dd) { + dstoff = dd.position() + prefixlen; + dd.put(bb); + assert dst.remaining() == (dstremaining - len) : "dst.remaining(): " + + dst.remaining() + + " expected: " + + dstremaining; + } } - + /* + * Insert record into destination. + * + * Note: The [orderedList] on the target buffer is not + * updated because we handle the propagation of the address + * allocation/clear notices separately and synchronously + * using prepareAddressMetadataForHA(). + */ + { + final RecordMetadata old = dst.recordMap.put(Long + .valueOf(fileOffset), new RecordMetadata( + fileOffset, dstoff/* bufferOffset */, + md.recordLength, md.latchedAddr)); + + assert old == null : "Write already found: " + old; + } + if (serviceRecordMap != null) { /* * Note: As soon as we update the service record map it @@ -2364,11 +2394,10 @@ * clear the record from [dst]. We can not rely on the * record remaining in [dst] after this method call! */ - final WriteCache tmp = serviceRecordMap - .put(offset, dst); - if (tmp != src) - throw new AssertionError("tmp=" + tmp + ",src=" - + src + ", offset=" + offset + ", md=" + md); + final WriteCache tmp = serviceRecordMap.put(fileOffset, + dst); + assert src == tmp : "tmp=" + tmp + ",src=" + src + + ", offset=" + fileOffset + ", md=" + md; } // Clear entry from src recordMap. @@ -2391,6 +2420,8 @@ throw new IllegalStateException(); } } finally { + if (dd != null) + dst.release(); src.release(); } } @@ -2559,20 +2590,6 @@ } -// /** -// * Return <code>true</code> iff we are allowed to compact buffers. The -// * default implementation of the {@link WriteCache} is for a Worm and can -// * never compact. -// * <p> -// * Note: This method is package private for access by -// * {@link WriteCacheService}. -// */ -// boolean canCompact() { -// -// return false; -// -// } - /** * Return the percentage of space that has been removed through the * application of {@link #clearAddrMap(long, int)} and hence could be Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2012-11-29 01:27:11 UTC (rev 6740) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2012-11-29 16:49:35 UTC (rev 6741) @@ -952,7 +952,7 @@ if (log.isTraceEnabled()) log.trace("Setting curCompactingCache to reserve"); - reserve.resetWith(recordMap, fileExtent.get()); + reserve.resetWith(recordMap);//, fileExtent.get()); curCompactingCache = reserve; if (log.isTraceEnabled()) log.trace("Transferring to curCompactingCache"); @@ -1142,15 +1142,25 @@ */ cache.closeForWrites(); + /* + * Test for an empty cache. + * + * Note: We can not do this until the cache has been closed for + * writes. + */ { final ByteBuffer b = cache.peek(); - if (b.position() == 0) + if (b.position() == 0) { + // Empty cache. return; + } } // increment writeCache sequence cache.setSequence(cacheSequence++); + cache.setFileExtent(fileExtent.get()); + if (quorum != null && quorum.isHighlyAvailable()) { // Verify quorum still valid and we are the leader. @@ -1832,7 +1842,13 @@ // m_dirtyListThreshold = saveDirtyListThreshold; flush = false; try { - assert compactingCache == null; + if(!halt) { + /* + * Can not check assertion if there is an existing + * exception. + */ + assert compactingCache == null; + } } finally { dirtyListLock.unlock(); } @@ -1858,7 +1874,7 @@ // Guaranteed available hence non-blocking. final WriteCache nxt = cleanList.take(); counters.get().nclean--; - nxt.resetWith(recordMap, fileExtent.get()); + nxt.resetWith(recordMap);//, fileExtent.get()); current.set(nxt); return true; } finally { @@ -1896,24 +1912,24 @@ if (fileExtent < 0L) throw new IllegalArgumentException(); - final WriteCache cache = acquireForWriter(); - - try { +// final WriteCache cache = acquireForWriter(); +// +// try { if (log.isDebugEnabled()) log.debug("Set fileExtent: " + fileExtent); // make a note of the current file extent. this.fileExtent.set(fileExtent); - // set the current file extent on the WriteCache. - cache.setFileExtent(fileExtent); +// // set the current file extent on the WriteCache. +// cache.setFileExtent(fileExtent); +// +// } finally { +// +// release(); +// +// } - } finally { - - release(); - - } - } public boolean write(final long offset, final ByteBuffer data, final int chk) @@ -2152,7 +2168,7 @@ counters.get().nclean--; // Clear the state on the new buffer and remove from // cacheService map - newBuffer.resetWith(recordMap, fileExtent.get()); + newBuffer.resetWith(recordMap);//, fileExtent.get()); // Set it as the new buffer. current.set(cache = newBuffer); @@ -2450,7 +2466,7 @@ counters.get().nclean--; // Clear state on new buffer and remove from cacheService map - newBuffer.resetWith(recordMap, fileExtent.get()); + newBuffer.resetWith(recordMap);//, fileExtent.get()); // Set it as the new buffer. current.set(newBuffer); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-11-29 01:27:11 UTC (rev 6740) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-11-29 16:49:35 UTC (rev 6741) @@ -954,6 +954,9 @@ @Override public Void call() throws Exception { + if (true) + throw new UnsupportedOperationException(); + // final long readLock = leader.newTx(ITx.READ_COMMITTED); try { @@ -1765,6 +1768,11 @@ throw new RuntimeException(e); + } catch (RuntimeException t) { + + // Wrap with the HA message. + throw new RuntimeException("msg=" + msg + ": " + t, t); + } } @@ -1968,7 +1976,7 @@ } setExtent(msg); - writeWriteCacheBlock(msg,data); + writeWriteCacheBlock(msg, data); } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2012-11-29 01:27:11 UTC (rev 6740) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2012-11-29 16:49:35 UTC (rev 6741) @@ -380,9 +380,9 @@ */ for (RemoteRepository r : repos) { - // Should be empty. - assertEquals(10L, - countResults(r.prepareTupleQuery("SELECT * {?a ?b ?c} LIMIT 10") + // Should have data. + assertEquals(100L, + countResults(r.prepareTupleQuery("SELECT * {?a ?b ?c} LIMIT 100") .evaluate())); } @@ -401,7 +401,54 @@ // Verify no HALog files since fully met quorum @ commit. assertHALogNotFound(0L/* firstCommitCounter */, lastCommitCounter2, new HAGlue[] { serverA, serverB, serverC }); + + /* + * Do a "DROP ALL" and reverify that no solutions are found on each + * service. + */ + { + + // Verify quorum is still valid. + quorum.assertQuorum(token); + + repos[0].prepareUpdate("DROP ALL").evaluate(); + + } + /* + * Verify that query on all nodes is allowed and now provides an empty + * result. + */ + for (RemoteRepository r : repos) { + + // Should be empty. + assertEquals( + 0L, + countResults(r.prepareTupleQuery( + "SELECT * {?a ?b ?c} LIMIT 100").evaluate())); + + } + + // Current commit point. + final long lastCommitCounter3 = serverA + .getRootBlock(new HARootBlockRequest(null/* storeUUID */)) + .getRootBlock().getCommitCounter(); + + // There are now THREE (3) commit points. + assertEquals(3L, lastCommitCounter3); + + // Verify binary equality. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + // Verify no HALog files since fully met quorum @ commit. + assertHALogNotFound(0L/* firstCommitCounter */, lastCommitCounter2, + new HAGlue[] { serverA, serverB, serverC }); + + /* + * TODO Continue test and verify restart? Or verify restart before we do + * the DROP ALL? + */ + } } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/FastRDFValueCoder2.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/FastRDFValueCoder2.java 2012-11-29 01:27:11 UTC (rev 6740) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/FastRDFValueCoder2.java 2012-11-29 16:49:35 UTC (rev 6741) @@ -174,7 +174,6 @@ * Decoder. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ private static class CodedRabaImpl extends AbstractCodedRaba { @@ -331,7 +330,8 @@ } else { - return new byte[] { bits }; +// return new byte[] { bits }; + return RDFValueFactory.getValue(bits); } Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/RDFValueFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/RDFValueFactory.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/RDFValueFactory.java 2012-11-29 16:49:35 UTC (rev 6741) @@ -0,0 +1,64 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +package com.bigdata.rdf.spo; + +/** + * Factory for the single element <code>byte[]</code> used for the value of an + * RDF Statement in one of the statement indices. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class RDFValueFactory { + + private final static byte[][] table = createStaticByteArrayTable(); + + private static byte[][] createStaticByteArrayTable() { + final byte[][] table = new byte[256][]; + + for (int i = 0; i < 256; i++) { + + table[i] = new byte[] { (byte) i }; + + } + + return table; + + } + + /** + * Return the B+Tree value for an RDF Statement given its byte value. + * + * @param i + * The byte value of the Statement. + * + * @return A byte[] whose sole element is that byte value. + */ + static public byte[] getValue(final byte i) { + + return table[i]; + + } + +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOTupleSerializer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOTupleSerializer.java 2012-11-29 01:27:11 UTC (rev 6740) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOTupleSerializer.java 2012-11-29 16:49:35 UTC (rev 6741) @@ -117,12 +117,12 @@ * @param keyOrder * The access path. * @param sids - * If true, attach sids to decoded SPOs where appropriate. + * If true, attach sids to decoded SPOs where appropriate. * @param leafKeySer * @param leafValSer */ public SPOTupleSerializer(final SPOKeyOrder keyOrder, - final boolean sids, + final boolean sids, final IRabaCoder leafKeySer, final IRabaCoder leafValSer) { super(new ASCIIKeyBuilderFactory(), leafKeySer, leafValSer); @@ -163,18 +163,18 @@ } - /** - * Variant duplicates the behavior of {@link #serializeVal(SPO)} to provide - * support for non-{@link SPO} {@link ISPO}s. - */ + /** + * Variant duplicates the behavior of {@link #serializeVal(SPO)} to provide + * support for non-{@link SPO} {@link ISPO}s. + */ public byte[] serializeVal(final ISPO spo) { - if (spo == null) + if (spo == null) throw new IllegalArgumentException(); - return serializeVal(//buf, - spo.isOverride(), spo.getUserFlag(), spo.getStatementType()); - + return serializeVal(//buf, + spo.isOverride(), spo.getUserFlag(), spo.getStatementType()); + } /** @@ -186,60 +186,62 @@ if (spo == null) throw new IllegalArgumentException(); - return serializeVal(//buf, - spo.isOverride(), spo.getUserFlag(), spo.getStatementType()); + return serializeVal(//buf, + spo.isOverride(), spo.getUserFlag(), spo.getStatementType()); - } + } - /** - * Return the byte[] that would be written into a statement index for this - * {@link SPO}, including the optional {@link StatementEnum#MASK_OVERRIDE} - * bit. If the statement identifier is non-null then it will be included in - * the returned byte[]. - * - * @param override - * <code>true</code> iff you want the - * {@link StatementEnum#MASK_OVERRIDE} bit set (this is only set - * when serializing values for a remote procedure that will write - * on the index, it is never set in the index itself). - * @param userFlag - * <code>true</code> iff you want the - * {@link StatementEnum#MASK_USER_FLAG} bit set. - * @param type - * The {@link StatementEnum}. - * - * @return The value that would be written into a statement index for this - * {@link SPO}. - */ + /** + * Return the byte[] that would be written into a statement index for this + * {@link SPO}, including the optional {@link StatementEnum#MASK_OVERRIDE} + * bit. If the statement identifier is non-null then it will be included in + * the returned byte[]. + * + * @param override + * <code>true</code> iff you want the + * {@link StatementEnum#MASK_OVERRIDE} bit set (this is only set + * when serializing values for a remote procedure that will write + * on the index, it is never set in the index itself). + * @param userFlag + * <code>true</code> iff you want the + * {@link StatementEnum#MASK_USER_FLAG} bit set. + * @param type + * The {@link StatementEnum}. + * + * @return The value that would be written into a statement index for this + * {@link SPO}. + */ // * @param buf // * A buffer supplied by the caller. The buffer will be reset // * before the value is written on the buffer. - public byte[] serializeVal(//final ByteArrayBuffer buf, - final boolean override, final boolean userFlag, - final StatementEnum type) { - -// buf.reset(); + public byte[] serializeVal(//final ByteArrayBuffer buf, + final boolean override, final boolean userFlag, + final StatementEnum type) { + +// buf.reset(); - // optionally set the override and user flag bits on the value. - final byte b = (byte) - (type.code() - | (override ? StatementEnum.MASK_OVERRIDE : 0x0) - | (userFlag ? StatementEnum.MASK_USER_FLAG : 0x0) - ); + // optionally set the override and user flag bits on the value. + final byte b = (byte) + (type.code() + | (override ? StatementEnum.MASK_OVERRIDE : 0x0) + | (userFlag ? StatementEnum.MASK_USER_FLAG : 0x0) + ); -// buf.putByte(b); +// buf.putByte(b); // -// final byte[] a = buf.toByteArray(); +// final byte[] a = buf.toByteArray(); // // assert a.length == 1 : "Expecting one byte, but have " // + BytesUtil.toString(a); - - return new byte[]{b}; + + return RDFValueFactory.getValue(b); - } + } + - public SPO deserialize(final ITuple tuple) { + public SPO deserialize(final ITuple tuple) { + if (tuple == null) throw new IllegalArgumentException(); @@ -271,12 +273,12 @@ } - /** - * Set the statement type, bit flags, and optional sid based on the tuple - * value. - */ + /** + * Set the statement type, bit flags, and optional sid based on the tuple + * value. + */ public ISPO decodeValue(final ISPO spo, final byte[] val) { - + final byte code = val[0]; final StatementEnum type = StatementEnum.decode(code); @@ -288,16 +290,16 @@ spo.setUserFlag(StatementEnum.isUserFlag(code)); if (sids) { - + // SIDs only valid for triples. assert keyOrder.getKeyArity() == 3; if (spo.isExplicit()) { - - spo.setStatementIdentifier(true); - + + spo.setStatementIdentifier(true); + } - + } return spo; @@ -329,10 +331,10 @@ switch (version) { case VERSION0: keyOrder = SPOKeyOrder.valueOf(in.readByte()); - /* - * New version is not backwards compatible with old journals that - * used sids. - */ + /* + * New version is not backwards compatible with old journals that + * used sids. + */ sids = false; break; case VERSION1: This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-12-03 14:44:40
|
Revision: 6752 http://bigdata.svn.sourceforge.net/bigdata/?rev=6752&view=rev Author: thompsonbry Date: 2012-12-03 14:44:33 +0000 (Mon, 03 Dec 2012) Log Message: ----------- Javadoc and relayered getProxy() to have the 2 argument version declared in AbstractJournal. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-12-03 14:43:44 UTC (rev 6751) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-12-03 14:44:33 UTC (rev 6752) @@ -2756,6 +2756,12 @@ * creating a properly formed root block. For a non-HA deployment, * we just lay down the root block. For an HA deployment, we do a * 2-phase commit. + * + * Note: In HA, the followers lay down the replicated writes + * synchronously. Thus, they are guaranteed to be on local storage + * by the time the leader finishes WriteCacheService.flush(). This + * does not create much latency because the WriteCacheService drains + * the dirtyList in a seperate thread. */ _bufferStrategy.commit(); @@ -5068,12 +5074,38 @@ * * @return The proxy for that future. */ - protected <E> Future<E> getProxy(final Future<E> future) { + final protected <E> Future<E> getProxy(final Future<E> future) { + return getProxy(future, false/* asyncFuture */); + + } + + /** + * Return a proxy object for a {@link Future} suitable for use in an RMI + * environment (the default implementation returns its argument). + * + * @param future + * The future. + * @param asyncFuture + * When <code>true</code>, the service should not wait for + * the {@link Future} to complete but should return a proxy + * object that may be used by the client to monitor or cancel + * the {@link Future}. When <code>false</code>, the method + * should wait for the {@link Future} to complete and then + * return a "thick" {@link Future} which wraps the completion + * state but does not permit asynchronous monitoring or + * cancellation of the operation wrapped by the + * {@link Future}. + * + * @return The proxy for that future. + */ + protected <E> Future<E> getProxy(final Future<E> future, + final boolean asyncFuture) { + return future; + + } - } - @Override public Future<Boolean> prepare2Phase( final IHA2PhasePrepareMessage prepareMessage) { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-12-03 14:43:44 UTC (rev 6751) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2012-12-03 14:44:33 UTC (rev 6752) @@ -1266,12 +1266,13 @@ } /** - * Note that {@link Future}s generated by - * <code>java.util.concurrent</code> are NOT {@link Serializable}. - * Futher note the proxy as generated by an {@link Exporter} MUST be - * encapsulated so that the object returned to the caller can implement - * {@link Future} without having to declare that the methods throw - * {@link IOException} (for RMI). + * {@inheritDoc} + * <p> + * Note: {@link Future}s generated by <code>java.util.concurrent</code> + * are NOT {@link Serializable}. Further note the proxy as generated by + * an {@link Exporter} MUST be encapsulated so that the object returned + * to the caller can implement {@link Future} without having to declare + * that the methods throw {@link IOException} (for RMI). * * @param future * The future. @@ -1280,12 +1281,6 @@ * exceptions. */ @Override - protected <E> Future<E> getProxy(final Future<E> future) { - - return getProxy(future, false/* asyncFuture */); - - } - protected <E> Future<E> getProxy(final Future<E> future, final boolean asyncFuture) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-12-03 16:48:48
|
Revision: 6755 http://bigdata.svn.sourceforge.net/bigdata/?rev=6755&view=rev Author: thompsonbry Date: 2012-12-03 16:48:37 +0000 (Mon, 03 Dec 2012) Log Message: ----------- Modified the HAJournalServer to handle quorumMeet() and quourmBreak() by queuing tasks on a single-threaded executor. These events arrive in the watcher thread. This change ensures that those tasks do not cause the watcher thread to block or deadlock. (Deadlocks were observed when a failed 2-phase commit caused a quorum break.) Changes to WriteCacheService are mostly javadoc and some minor restructuring. Removed some FIXMEs in AbstractHAJournalServerTestCase and reorganized the javadoc as it relates to TestConcurrentKBCreate in that test case. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2012-12-03 14:55:16 UTC (rev 6754) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2012-12-03 16:48:37 UTC (rev 6755) @@ -622,6 +622,12 @@ * through without actually taking anything off of the dirtyList. */ private final int m_dirtyListThreshold; + + /** + * When <code>true</code>, dirty buffers are immediately drained, compacted, + * and then written out to the backing media and (in HA mode) to the + * followers. + */ private volatile boolean flush = false; protected Callable<Void> newWriteTask() { @@ -1137,11 +1143,33 @@ throws InterruptedException, ExecutionException, IOException { /* + * IFF HA + * + * TODO isHA should be true even if the quorum is not highly + * available since there still could be other services in the write + * pipeline (e.g., replication to an offline HAJournalServer prior + * to changing over into an HA3 quorum or off-site replication). The + * unit tests need to be updated to specify [isHighlyAvailable] for + * ALL quorum based test runs. + */ + final boolean isHA = quorum != null && quorum.isHighlyAvailable(); + + // IFF HA and this is the quorum leader. + final boolean isHALeader = isHA + && quorum.getClient().isLeader(quorumToken); + + /* * Ensure nothing will modify this buffer before written to disk or - * HA pipeline + * HA pipeline. + * + * Note: Do NOT increment the cacheSequence here. We need to decide + * whether or not the buffer is empty first, and it needs to be + * closed for writes before we can make that decision. */ + + // Must be closed for writes. cache.closeForWrites(); - + /* * Test for an empty cache. * @@ -1155,14 +1183,15 @@ return; } } - - // increment writeCache sequence + + // Increment WriteCache sequence. cache.setSequence(cacheSequence++); + // Set the current file extent on the WriteCache. cache.setFileExtent(fileExtent.get()); - - if (quorum != null && quorum.isHighlyAvailable()) { + if (isHALeader) {//quorum != null && quorum.isHighlyAvailable()) { + // Verify quorum still valid and we are the leader. quorum.assertLeader(quorumToken); @@ -1191,17 +1220,14 @@ ); /* - * The quorum leader must log the write cache block. + * The quorum leader logs the write cache block here. For the + * followers, the write cache blocks are currently logged by + * HAJournalServer. * - * TODO When adding support for asynchronous replication we will - * have to ensure that the followers log the write cache blocks - * exactly once. They currently do this in HAJournalService. - * - * Note: The WriteCacheService absorbs a lot of latency, but we - * are still going to have that increased latency when it comes - * time to flush() the data to the followers. Going asynchronous - * with the dirty list replication would reduce that latency (as - * would compacting the dirty list buffers when [flush:=true]. + * Note: In HA with replicationFactor=1, this should still + * attempt to replicate the write cache block in case there is + * someone else in the write pipeline (for example, off-site + * replication). */ quorumMember.logWriteCacheBlock(msg, b.duplicate()); @@ -1220,16 +1246,31 @@ * trap asynchronous close exceptions arising from the interrupt of * a concurrent IO operation and retry until they succeed. */ - if (log.isDebugEnabled()) - log.debug("Writing to file: " + cache.toString()); + { - cache.flush(false/* force */); - - counters.get().nbufferEvictedToChannel++; + if (log.isDebugEnabled()) + log.debug("Writing to file: " + cache.toString()); - // Wait for the downstream IOs to finish. + // Flush WriteCache buffer to channel (write on disk) + cache.flush(false/* force */); + + counters.get().nbufferEvictedToChannel++; + + } + + /* + * Wait for the downstream IOs to finish. + * + * Note: Only the leader is doing replication of the WriteCache + * blocks from this thread and only the leader will have a non-null + * value for the [remoteWriteFuture]. The followers are replicating + * to the downstream nodes in QuorumPipelineImpl. Since the WCS + * absorbs a lot of latency, replication from QuorumPipelineImpl + * should be fine. + */ if (remoteWriteFuture != null) { + // Wait for the downstream IOs to finish. remoteWriteFuture.get(); } @@ -1708,18 +1749,28 @@ * resulting in a high-level abort() and {@link #reset()} of the * {@link WriteCacheService}. * + * TODO flush() is currently designed to block concurrent writes() in + * order to give us clean decision boundaries for the HA write pipeline and + * also to simplify the internal locking design. Once we get HA worked out + * cleanly we should explore whether or not we can relax this constraint + * such that writes can run concurrently with flush(). That would have + * somewhat higher throughput since mutable B+Tree evictions would no longer + * cause concurrent tasks to block during the commit protocol or the file + * extent protocol. [Perhaps by associating each write set with a distinct + * sequence counter (that is incremented by both commit and abort)?] + * + * TODO Flush should order ALL {@link WriteCache}'s on the dirtyList by + * their fileOffset and then evict them in that order. This reordering will + * maximize the opportunity for locality during the IOs. With a large write + * cache (multiple GBs) this reordering could substantially reduce the + * IOWait associated with flush() for a large update. Note: The reordering + * should only be performed by the leader in HA mode - the followers will + * receive the {@link WriteCache} blocks in the desired order and can just + * drop them onto the dirtyList. + * * @see WriteTask * @see #dirtyList * @see #dirtyListEmpty - * - * @todo Note: flush() is currently designed to block concurrent writes() in - * order to give us clean decision boundaries for the HA write - * pipeline and also to simplify the internal locking design. Once we - * get HA worked out cleanly we should explore whether or not we can - * relax this constraint such that writes can run concurrently with - * flush(). That would have somewhat higher throughput since mutable - * B+Tree evictions would no longer cause concurrent tasks to block - * during the commit protocol or the file extent protocol. */ public boolean flush(final boolean force, final long timeout, final TimeUnit units) throws TimeoutException, InterruptedException { @@ -1733,7 +1784,7 @@ * block. Writing the root block is the only thing that the nodes in * the quorum need to do once the write cache has been flushed. */ - haLog.info("Flushing the write cache."); + haLog.info("Flushing the write cache: seq=" + cacheSequence); } final long begin = System.nanoTime(); @@ -1778,7 +1829,6 @@ if (!dirtyListLock.tryLock(remaining, TimeUnit.NANOSECONDS)) throw new TimeoutException(); -// final int saveDirtyListThreshold = m_dirtyListThreshold; try { /* * Force WriteTask.call() to evict anything in the cache. @@ -1787,15 +1837,7 @@ * before writing out the compacting cache (if any) and then * finally drop the compactingCache onto the cleanList. Or have * a 2-stage flush. - * - * FIXME We want it to continue to compact the cache buffers - * during flush so it always outputs dense buffers. The code - * right now will NOT compact the cache buffers when flush is - * true. The behavior when flush:=true should be modified to - * compact the buffer and then write it out rather than dropping - * it back onto the dirty list. */ -// m_dirtyListThreshold = 1; flush = true; /* @@ -1839,7 +1881,6 @@ if (halt) throw new RuntimeException(firstCause.get()); } finally { -// m_dirtyListThreshold = saveDirtyListThreshold; flush = false; try { if(!halt) { @@ -1876,6 +1917,8 @@ counters.get().nclean--; nxt.resetWith(recordMap);//, fileExtent.get()); current.set(nxt); + if (haLog.isInfoEnabled()) + haLog.info("Flushed the write cache: seq=" + cacheSequence); return true; } finally { cleanListLock.unlock(); @@ -2405,6 +2448,91 @@ } +// /** +// * Accept the data for a replicated {@link WriteCache} buffer and drop it +// * onto the dirtyList. +// * <p> +// * This method supports HA replication. It take a {@link WriteCache} from +// * the cleanList, resets it for new writes, copies the data from the +// * caller's buffer, closes the {@link WriteCache} to prevent any +// * modifications, and then drops the {@link WriteCache} onto the +// * {@link #dirtyList}. +// * <p> +// * Note: By dropping the {@link WriteCache} onto the {@link #dirtyList}, we +// * benefit from being able to read back the writes from the cache. +// * Historically, the store was simply flushing the write through to the +// * backing file but this did not install the writes into the cache on the +// * followers. +// * <p> +// * Note: The caller's buffer is copied rather than retaining a reference. +// * This is necessary since the {@link HAReceiveService} reuses the same +// * buffer for all replicated {@link WriteCache} buffers. +// * +// * @param msg +// * The {@link IHAWriteMessage}. +// * @param b +// * The data from position to the limit will be copied. The +// * position will be advanced to limit. The caller should +// * {@link ByteBuffer#duplicate()} the {@link ByteBuffer} to avoid +// * side-effects. +// * +// * @throws InterruptedException +// * +// * FIXME Dropping the writeCache buffer onto the dirtyList does +// * not work because the writes on the write pipeline are not +// * synchronously written down to the disk and flush() on the +// * leader does not provide a guarantee that the followers are +// * also flushed. +// * <p> +// * Note: What would work is to first execute the synchronous +// * write to the local store and then drop the write cache buffer +// * (which must have a copy of the data in the buffer since the +// * buffer is owned by the HAReceiveService) onto the cleanList +// * in the WriteCacheService. This will be addressed when we +// * install reads on cache miss since those also need to be +// * installed onto the cleanList. We must also setup compaction +// * for the cleanList. +// */ +// public void copyRawBuffer(final IHAWriteMessage msg, final ByteBuffer b) +// throws InterruptedException { +// +// if (haLog.isDebugEnabled()) +// haLog.debug("msg=" + msg); +// +// /* +// * Take a buffer from the clean list. +// */ +// final WriteCache cache; +// cleanListLock.lockInterruptibly(); +// try { +// cache = cleanList.take(); +// counters.get().nclean--; +// cache.resetWith(recordMap);//, msg.getFileExtent()); +// } finally { +// cleanListLock.unlock(); +// } +// +// // transfer the data from the caller's buffer (side-effect on b). +// cache.copyRawBuffer(b); +// +// // close the cache against writes. +// cache.closeForWrites(); +// +// /* +// * Add the WriteCache to the dirty list. +// * +// * Note: The lock is required to signal dirtyListChange. +// */ +// dirtyListLock.lockInterruptibly(); +// try { +// dirtyList.add(cache); +// dirtyListChange.signalAll(); +// } finally { +// dirtyListLock.unlock(); +// } +// +// } + /** * Move the {@link #current} buffer to the dirty list and await a clean * buffer. The clean buffer is set as the {@link #current} buffer and Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-12-03 14:55:16 UTC (rev 6754) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2012-12-03 16:48:37 UTC (rev 6755) @@ -20,6 +20,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.FutureTask; +import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.locks.Lock; @@ -59,6 +60,7 @@ import com.bigdata.io.writecache.WriteCache; import com.bigdata.jini.start.config.ZookeeperClientConfig; import com.bigdata.jini.util.JiniUtil; +import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.IHABufferStrategy; import com.bigdata.journal.IRootBlockView; import com.bigdata.quorum.Quorum; @@ -74,6 +76,8 @@ import com.bigdata.service.jini.RemoteAdministrable; import com.bigdata.service.jini.RemoteDestroyAdmin; import com.bigdata.util.InnerCause; +import com.bigdata.util.concurrent.LatchedExecutor; +import com.bigdata.util.concurrent.MonitoredFutureTask; import com.bigdata.util.config.NicUtil; import com.bigdata.zookeeper.ZooKeeperAccessor; import com.sun.jini.start.LifeCycle; @@ -174,6 +178,21 @@ private ZooKeeperAccessor zka; + /** + * An executor used to handle events that were received in the zk watcher + * event thread. We can not take actions that could block in the watcher + * event thread. Therefore, a task for the event is dropped onto this + * service where it will execute asynchronously with respect to the watcher + * thread. + * <p> + * Note: This executor will be torn down when the backing + * {@link AbstractJournal#getExecutorService()} is torn down. Tasks + * remaining on the backing queue for the {@link LatchedExecutor} will be + * unable to execute successfuly and the queue will be drained as attempts + * to run those tasks result in {@link RejectedExecutionException}s. + */ + private LatchedExecutor singleThreadExecutor; + private HAGlue haGlueService; /** @@ -394,15 +413,23 @@ replicationFactor, zka, acl); } + // The HAJournal. this.journal = new HAJournal(properties, quorum); } + // executor for events received in the watcher thread. + singleThreadExecutor = new LatchedExecutor( + journal.getExecutorService(), 1/* nparallel */); + + // our external interface. haGlueService = journal.newHAGlue(serviceUUID); + // wrap the external interface, exposing administrative functions. final AdministrableHAGlueService administrableService = new AdministrableHAGlueService( this, haGlueService); + // return that wrapped interface. return administrableService; } @@ -750,7 +777,8 @@ super.start(quorum); // TODO It appears to be a problem to do this here. Maybe because - // the watcher is not running yet? + // the watcher is not running yet? Could submit a task that could + // await an appropriate condition to start.... // final QuorumActor<?, ?> actor = quorum.getActor(); // actor.memberAdd(); // actor.pipelineAdd(); @@ -765,89 +793,132 @@ public void quorumBreak() { super.quorumBreak(); - - // Inform the Journal that the quorum token is invalid. - journal.setQuorumToken(Quorum.NO_QUORUM); - if(HA_LOG_ENABLED) { + // Submit task to handle this event. + server.singleThreadExecutor.execute(new MonitoredFutureTask<Void>( + new QuorumBreakTask())); + + } + + /** + * Task to handle a quorum break event. + */ + private class QuorumBreakTask implements Callable<Void> { + + public Void call() throws Exception { - try { + // Inform the Journal that the quorum token is invalid. + journal.setQuorumToken(Quorum.NO_QUORUM); - journal.getHALogWriter().disable(); + if (HA_LOG_ENABLED) { - } catch (IOException e) { + try { - haLog.error(e, e); + journal.getHALogWriter().disable(); + } catch (IOException e) { + + haLog.error(e, e); + + } + } - - } -// if (server.isRunning()) { -// /* -// * Attempt to cast a vote for our lastCommitTime. -// * -// * FIXME BOUNCE : May need to trigger when we re-connect with -// * zookeeper if this event was triggered by a zk session -// * expiration. -// */ -// doConditionalCastVote(server, -// (Quorum<HAGlue, QuorumService<HAGlue>>) this -// .getQuorum(), -// journal); -// } + // if (server.isRunning()) { + // /* + // * Attempt to cast a vote for our lastCommitTime. + // * + // * FIXME BOUNCE : May need to trigger when we re-connect with + // * zookeeper if this event was triggered by a zk session + // * expiration. + // */ + // doConditionalCastVote(server, + // (Quorum<HAGlue, QuorumService<HAGlue>>) this + // .getQuorum(), + // journal); + // } - } + // Done. + return null; + } + } // class QuorumBreakTask + @Override public void quorumMeet(final long token, final UUID leaderId) { super.quorumMeet(token, leaderId); - // Inform the journal that there is a new quorum token. - journal.setQuorumToken(token); + // Submit task to handle this event. + server.singleThreadExecutor.execute(new MonitoredFutureTask<Void>( + new QuorumMeetTask(token, leaderId))); - if (HA_LOG_ENABLED) { + } - if (isJoinedMember(token)) { + /** + * Task to handle a quorum meet event. + */ + private class QuorumMeetTask implements Callable<Void> { - try { + private final long token; + private final UUID leaderId; - journal.getHALogWriter().createLog( - journal.getRootBlockView()); + public QuorumMeetTask(final long token, final UUID leaderId) { + this.token = token; + this.leaderId = leaderId; + } + + public Void call() throws Exception { + + // Inform the journal that there is a new quorum token. + journal.setQuorumToken(token); - } catch (IOException e) { + if (HA_LOG_ENABLED) { - /* - * We can not remain in the quorum if we can not write - * the HA Log file. - */ - haLog.error("CAN NOT OPEN LOG: " + e, e); + if (isJoinedMember(token)) { - getActor().serviceLeave(); + try { - } + journal.getHALogWriter().createLog( + journal.getRootBlockView()); - } else { + } catch (IOException e) { - /* - * The quorum met, but we are not in the met quorum. - * - * Note: We need to synchronize in order to join an already - * met quorum. We can not just vote our lastCommitTime. We - * need to go through the synchronization protocol in order - * to make sure that we actually have the same durable state - * as the met quorum. - */ + /* + * We can not remain in the quorum if we can not write + * the HA Log file. + */ + haLog.error("CAN NOT OPEN LOG: " + e, e); - conditionalStartResync(token); + getActor().serviceLeave(); + } + + } else { + + /* + * The quorum met, but we are not in the met quorum. + * + * Note: We need to synchronize in order to join an already + * met quorum. We can not just vote our lastCommitTime. We + * need to go through the synchronization protocol in order + * to make sure that we actually have the same durable state + * as the met quorum. + */ + + conditionalStartResync(token); + + } + } + + // Done. + return null; - } - - } - + } // call() + + } // class QuorumMeetTask + @Override public void pipelineAdd() { @@ -884,10 +955,9 @@ } - resyncFuture = new FutureTaskMon<Void>(new ResyncTask()); + resyncFuture = new MonitoredFutureTask<Void>(new ResyncTask()); journal.getExecutorService().submit(resyncFuture); - } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2012-12-03 14:55:16 UTC (rev 6754) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2012-12-03 16:48:37 UTC (rev 6755) @@ -325,16 +325,13 @@ /** * Wait until the KB exists. * - * @param haGlue - * The server. + * Note: There is a data race when creating the a KB (especially the default + * KB) and verifying that the KB exists. If we find the KB in the row store + * cache but we do not find the axioms, then the subsequent attempts to + * resolve the KB fail - probably due to an issue with the default resource + * locator cache. * - * FIXME There is a data race when creating the a KB (especially - * the default KB) and verifying that the KB exists. If we find - * the KB in the row store cache but we do not find the axioms, - * then the subsequent attempts to resolve the KB fail - probably - * due to an issue with the default resource locator cache. - * - * <pre> + * <pre> * INFO : 41211 2012-11-06 08:38:41,874 : WARN : 8542 2012-11-06 08:38:41,873 qtp877533177-45 org.eclipse.jetty.util.log.Slf4jLog.warn(Slf4jLog.java:50): /sparql * INFO : 41211 2012-11-06 08:38:41,874 : java.lang.RuntimeException: java.lang.RuntimeException: java.lang.RuntimeException: No axioms defined? : LocalTripleStore{timestamp=-1, namespace=kb, container=null, indexManager=com.bigdata.journal.jini.ha.HAJournal@4d092447} * INFO : 41211 2012-11-06 08:38:41,874 : at com.bigdata.rdf.sail.webapp.QueryServlet.doEstCard(QueryServlet.java:1120) @@ -373,6 +370,12 @@ * INFO : 41212 2012-11-06 08:38:41,875 : ... 23 more * </pre> * + * @param haGlue + * The server. + * + * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/617" > + * Concurrent KB create fails with "No axioms defined?" </a> + * * @see TestConcurrentKBCreate */ protected void awaitKBExists(final HAGlue haGlue) throws IOException { @@ -382,12 +385,10 @@ assertCondition(new Runnable() { public void run() { try { - shortSleep(); // FIXME REMOVE! repo.size(); } catch (Exception e) { // KB does not exist. fail(); - shortSleep(); // FIXME REMOVE! } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-12-12 17:53:30
|
Revision: 6764 http://bigdata.svn.sourceforge.net/bigdata/?rev=6764&view=rev Author: thompsonbry Date: 2012-12-12 17:53:16 +0000 (Wed, 12 Dec 2012) Log Message: ----------- => done: DumpJournal w/o -pages should give the table w/o the averages per node/leaf, etc. This required some GIST changes. In particular, I added dumpPages(recursive:boolean) to ICheckpointProtocol, factored out an ICheckpoint interface, made IndexSegmentCheckpoint an implementation of the ICheckpoint interface and made IndexSegment implement ICheckpointProtocol. Bug fix to DumpJournal where it was closing stdout. => DumpJournal w/ prefix (StatusServlet) Any &dumpNamespace=NAMESPACE URL query parameters will be used to dump only the specified namespaces. The namespace parameter here is a *prefix*. Any index having any of the specified prefixes will be dumped. When &dumpPages is also specified, the pages for the indices covered by the namespace prefix will be dumped. For example, this will only dump the statement indices for the "kb" triple store instance. http://localhost:8080/status?dumpJournal&dumpNamespace=kb.spo While this will dump the indices for two different KBs (if they exist). http://localhost:8080/status?dumpJournal&dumpNamespace=kb1&dumpNamespace=kb2 => done. index.html - added link for the VoID graph of the known KBs. => done. Sparql endpoint for tenants does provide their ServiceDescription. The trailing /sparql path component was not being recognized and the URL was being treated as a linked data query and turned into a DESCRIBE of that sparql endpoint rather than a SERVICE DESCRIPTION request for that end point. @see https://sourceforge.net/apps/trac/bigdata/ticket/585 (GIST) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/AbstractBTree.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/Checkpoint.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ISimpleTreeIndexAccess.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/IndexSegment.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/IndexSegmentCheckpoint.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/PageStats.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/HTree.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/htree/TestHTreeWithMemStore.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-war/src/html/index.html Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/BaseIndexStats.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ICheckpoint.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -40,6 +40,7 @@ import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IConstant; import com.bigdata.bop.IVariable; +import com.bigdata.btree.BaseIndexStats; import com.bigdata.btree.Checkpoint; import com.bigdata.btree.IndexMetadata; import com.bigdata.io.SerializerUtil; @@ -459,6 +460,11 @@ } + @Override + public BaseIndexStats dumpPages(final boolean recursive) { + return new BaseIndexStats(this); + } + /* * I've commented out the AccessPath and Predicate abstractions for now. * They were not required to implement the SPARQL CACHE mechanism, but Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/AbstractBTree.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/AbstractBTree.java 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/AbstractBTree.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -136,7 +136,8 @@ * @see KeyBuilder */ abstract public class AbstractBTree implements IIndex, IAutoboxBTree, - ILinearList, IBTreeStatistics, ILocalBTreeView, ISimpleTreeIndexAccess { + ILinearList, IBTreeStatistics, ILocalBTreeView, ISimpleTreeIndexAccess, + ICheckpointProtocol { /** * The index is already closed. @@ -1507,8 +1508,14 @@ } @Override - public PageStats dumpPages() { + public BaseIndexStats dumpPages(final boolean recursive) { + if(!recursive) { + + return new BaseIndexStats(this); + + } + final BTreePageStats stats = new BTreePageStats(); dumpPages(this, getRoot(), stats); Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/BaseIndexStats.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/BaseIndexStats.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/BaseIndexStats.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -0,0 +1,166 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.btree; + + +/** + * Basic stats that are available for all index types and whose collection does + * not require visitation of the index pages. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class BaseIndexStats { + + /** The type of index. */ + public IndexTypeEnum indexType; + /** + * The name associated with the index -or- <code>null</code> if the index is + * not named. + */ + public String name; + /** + * The current branching factor for the index. + * + * TODO GIST: [m] is BTree specific. The [addressBits] concept is the + * parallel for the HTree. This field should probably be moved into the + * concrete instances of the {@link PageStats} class. + */ + public int m; + /** The #of entries in the index. */ + public long ntuples; + /** The height (aka depth) of the index */ + public int height; + /** The #of nodes visited. */ + public long nnodes; + /** The #of leaves visited. */ + public long nleaves; + + /** + * Zero-arg constructor does NOT initialize the fields. + */ + public BaseIndexStats() { + + } + + /** + * Initializes the fields for the specified index. + */ + public BaseIndexStats(final ICheckpointProtocol ndx) { + + if (ndx == null) + throw new IllegalArgumentException(); + + final ICheckpoint checkpoint = ndx.getCheckpoint(); + + final IndexMetadata metadata = ndx.getIndexMetadata(); + + this.indexType = checkpoint.getIndexType(); + + this.name = metadata.getName(); + + switch (indexType) { + case BTree: + this.m = metadata.getBranchingFactor(); + break; + case HTree: + m = ((HTreeIndexMetadata) metadata).getAddressBits(); + break; + case Stream: + m = 0; // N/A + break; + default: + throw new AssertionError("Unknown indexType=" + indexType); + } + + /* + * Note: The "height" of the HTree must be computed dynamically since + * the HTree is not a balanced tree. It will be reported as ZERO (0) + * using this logic. + */ + this.height = checkpoint.getHeight(); + + this.ntuples = checkpoint.getEntryCount(); + + this.nnodes = checkpoint.getNodeCount(); + + this.nleaves = checkpoint.getLeafCount(); + + } + + /** + * Return the header row for a table. + * + * @return The header row. + */ + public String getHeaderRow() { + + final StringBuilder sb = new StringBuilder(); + + sb.append("name"); + sb.append('\t'); + sb.append("indexType"); + sb.append('\t'); + sb.append("m"); + sb.append('\t'); + sb.append("height"); + sb.append('\t'); + sb.append("nnodes"); + sb.append('\t'); + sb.append("nleaves"); + sb.append('\t'); + sb.append("nentries"); + + return sb.toString(); + } + + /** + * Return a row of data for an index as aggregated by this {@link PageStats} + * object. + * + * @see #getHeaderRow() + */ + public String getDataRow() { + + final BaseIndexStats stats = this; + + final StringBuilder sb = new StringBuilder(); + + sb.append(name); + sb.append('\t'); + sb.append(indexType); + sb.append('\t'); + sb.append(stats.m); + sb.append('\t'); + sb.append(stats.height); + sb.append('\t'); + sb.append(stats.nnodes); + sb.append('\t'); + sb.append(stats.nleaves); + sb.append('\t'); + sb.append(stats.ntuples); + + return sb.toString(); + } + +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/Checkpoint.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/Checkpoint.java 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/Checkpoint.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -51,7 +51,7 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ -public class Checkpoint implements Externalizable { +public class Checkpoint implements ICheckpoint, Externalizable { /** * @@ -80,16 +80,6 @@ */ private IndexTypeEnum indexType; - /** - * The address used to read this {@link Checkpoint} record from the - * store. - * <p> - * Note: This is set as a side-effect by {@link #write(IRawStore)}. - * - * @throws IllegalStateException - * if the {@link Checkpoint} record has not been written on - * a store. - */ final public long getCheckpointAddr() { if (addrCheckpoint == 0L) { @@ -102,57 +92,30 @@ } - /** - * Return <code>true</code> iff the checkpoint address is defined. - */ final public boolean hasCheckpointAddr() { - return addrCheckpoint != 0L; + return addrCheckpoint != 0L; } - /** - * Address that can be used to read the {@link IndexMetadata} record for - * the index from the store. - */ final public long getMetadataAddr() { return addrMetadata; } - /** - * Address of the root node or leaf of the {@link BTree}. - * - * @return The address of the root -or- <code>0L</code> iff the btree - * does not have a root. - */ final public long getRootAddr() { return addrRoot; } - /** - * Address of the {@link IBloomFilter}. - * - * @return The address of the bloom filter -or- <code>0L</code> iff the - * btree does not have a bloom filter. - */ final public long getBloomFilterAddr() { return addrBloomFilter; } - /** - * The height of a B+Tree. ZERO(0) means just a root leaf. Values greater - * than zero give the #of levels of abstract nodes. There is always one - * layer of leaves which is not included in this value. - * - * @return The global depth and ZERO (0) unless the checkpoint record is for - * an {@link IndexTypeEnum#BTree} - */ public final int getHeight() { switch (indexType) { @@ -164,12 +127,6 @@ } - /** - * The global depth of the root directory (HTree only). - * - * @return The global depth and ZERO (0) unless the checkpoint record is for - * an {@link IndexTypeEnum#HTree} - */ public final int getGlobalDepth() { switch (indexType) { @@ -181,59 +138,37 @@ } - /** - * The #of non-leaf nodes (B+Tree) or directories (HTree). - */ public final long getNodeCount() { return nnodes; } - /** - * The #of leaves (B+Tree) or hash buckets (HTree). - */ public final long getLeafCount() { return nleaves; } - /** - * The #of index entries (aka tuple count). - */ public final long getEntryCount() { return nentries; } - /** - * Return the value of the B+Tree local counter stored in the - * {@link Checkpoint} record. - */ public final long getCounter() { return counter; } - /** - * Return the value of the next record version number to be assigned that is - * stored in the {@link Checkpoint} record. This number is incremented each - * time a node or leaf is written onto the backing store. The initial value - * is ZERO (0). The first value assigned to a node or leaf will be ZERO (0). - */ public final long getRecordVersion() { return counter; } - /** - * The type of index for this checkpoint record. - */ - public IndexTypeEnum getIndexType() { + public final IndexTypeEnum getIndexType() { return indexType; Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ICheckpoint.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ICheckpoint.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ICheckpoint.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -0,0 +1,128 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Nov 15, 2006 + */ +package com.bigdata.btree; + +import com.bigdata.rawstore.IRawStore; + +/** + * Metadata for an index checkpoint record. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public interface ICheckpoint { + + /** + * The address used to read this {@link Checkpoint} record from the store. + * <p> + * Note: This is set as a side-effect by {@link #write(IRawStore)}. + * + * @throws IllegalStateException + * if the {@link Checkpoint} record has not been written on a + * store. + */ + long getCheckpointAddr(); + + /** + * Return <code>true</code> iff the checkpoint address is defined. + */ + boolean hasCheckpointAddr(); + + /** + * Address that can be used to read the {@link IndexMetadata} record for the + * index from the store. + */ + long getMetadataAddr(); + + /** + * Address of the root node or leaf of the {@link BTree}. + * + * @return The address of the root -or- <code>0L</code> iff the index does + * not have a root page. + */ + long getRootAddr(); + + /** + * Address of the {@link IBloomFilter}. + * + * @return The address of the bloom filter -or- <code>0L</code> iff the + * index does not have a bloom filter. + */ + long getBloomFilterAddr(); + + /** + * The height of a B+Tree. ZERO(0) means just a root leaf. Values greater + * than zero give the #of levels of abstract nodes. There is always one + * layer of leaves which is not included in this value. + * + * @return The global depth and ZERO (0) unless the checkpoint record is for + * an {@link IndexTypeEnum#BTree} + */ + int getHeight(); + + /** + * The global depth of the root directory (HTree only). + * + * @return The global depth and ZERO (0) unless the checkpoint record is for + * an {@link IndexTypeEnum#HTree} + */ + int getGlobalDepth(); + + /** + * The #of non-leaf nodes (B+Tree) or directories (HTree). + */ + long getNodeCount(); + + /** + * The #of leaves (B+Tree) or hash buckets (HTree). + */ + long getLeafCount(); + + /** + * The #of index entries (aka tuple count). + */ + long getEntryCount(); + + /** + * Return the value of the B+Tree local counter stored in the + * {@link Checkpoint} record. + */ + long getCounter(); + + /** + * Return the value of the next record version number to be assigned that is + * stored in the {@link Checkpoint} record. This number is incremented each + * time a node or leaf is written onto the backing store. The initial value + * is ZERO (0). The first value assigned to a node or leaf will be ZERO (0). + */ + long getRecordVersion(); + + /** + * The type of index for this checkpoint record. + */ + IndexTypeEnum getIndexType(); + +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -56,12 +56,12 @@ public long getRecordVersion(); /** - * Returns the most recent {@link Checkpoint} record. + * Returns the most recent {@link ICheckpoint} record. * - * @return The most recent {@link Checkpoint} record and never + * @return The most recent {@link ICheckpoint} record and never * <code>null</code>. */ - public Checkpoint getCheckpoint(); + public ICheckpoint getCheckpoint(); /** * The address at which the most recent {@link IndexMetadata} record was @@ -241,5 +241,17 @@ * @see #reopen() */ public boolean isOpen(); + + /** + * Reports statistics for the index. + * + * @param recursive + * When <code>true</code>, also collects statistics on the pages + * (nodes and leaves) using a low-level approach. + * + * @return Some interesting statistics about the index (and optionally the + * pages in that index) which the caller can print out. + */ + BaseIndexStats dumpPages(boolean recursive); } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ISimpleTreeIndexAccess.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ISimpleTreeIndexAccess.java 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ISimpleTreeIndexAccess.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -78,15 +78,4 @@ */ int getHeight(); - /** - * Visits pages (nodes and leaves) using a low-level approach. - * - * @param ndx - * The index. - * - * @return Some interesting statistics about the pages in that index which - * the caller can print out. - */ - PageStats dumpPages(); - } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/IndexSegment.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/IndexSegment.java 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/IndexSegment.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -37,7 +37,6 @@ import com.bigdata.service.Event; import com.bigdata.service.EventResource; import com.bigdata.service.EventType; -import com.bigdata.striterator.ICloseableIterator; /** * An index segment is read-only btree corresponding to some key range of a @@ -122,6 +121,7 @@ // // } + @Override final public int getHeight() { // Note: fileStore.checkpoint is now final. reopen() is not required. // reopen(); @@ -130,6 +130,7 @@ } + @Override final public long getNodeCount() { // Note: fileStore.checkpoint is now final. reopen() is not required. // reopen(); @@ -138,6 +139,7 @@ } + @Override final public long getLeafCount() { // Note: fileStore.checkpoint is now final. reopen() is not required. // reopen(); @@ -146,6 +148,7 @@ } + @Override final public long getEntryCount() { // Note: fileStore.checkpoint is now final. reopen() is not required. // reopen(); @@ -154,6 +157,58 @@ } + @Override + final public ICheckpoint getCheckpoint() { + + return fileStore.getCheckpoint(); + + } + + @Override + public long getRecordVersion() { + return getCheckpoint().getRecordVersion(); + } + + @Override + public long getMetadataAddr() { + return getCheckpoint().getMetadataAddr(); + } + + @Override + public long getRootAddr() { + return getCheckpoint().getRootAddr(); + } + + @Override + public void setLastCommitTime(long lastCommitTime) { + throw new UnsupportedOperationException(ERROR_READ_ONLY); + } + + @Override + public long writeCheckpoint() { + throw new UnsupportedOperationException(ERROR_READ_ONLY); + } + + @Override + public Checkpoint writeCheckpoint2() { + throw new UnsupportedOperationException(ERROR_READ_ONLY); + } + + @Override + public IDirtyListener getDirtyListener() { + throw new UnsupportedOperationException(ERROR_READ_ONLY); + } + + @Override + public void setDirtyListener(IDirtyListener listener) { + throw new UnsupportedOperationException(ERROR_READ_ONLY); + } + + @Override + public long handleCommit(long commitTime) { + throw new UnsupportedOperationException(ERROR_READ_ONLY); + } + /** * {@inheritDoc} * <p> Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/IndexSegmentCheckpoint.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/IndexSegmentCheckpoint.java 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/IndexSegmentCheckpoint.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -57,7 +57,7 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ -public class IndexSegmentCheckpoint { +public class IndexSegmentCheckpoint implements ICheckpoint { /** * Logger. @@ -1033,4 +1033,94 @@ return sb.toString(); } + /** + * {@inheritDoc} + * <p> + * Note: The checkpoint is assembled from the root block by the constructor. + * There is no address from which it can be re-read. + * + * @return <code>0L</code> + */ + @Override + public long getCheckpointAddr() { + return 0L; + } + + /** + * {@inheritDoc} + * <p> + * Note: The checkpoint is assembled from the root block by the constructor. + * There is no address from which it can be re-read. + * + * @return <code>false</code> + */ + @Override + public boolean hasCheckpointAddr() { + return false; + } + + @Override + public long getMetadataAddr() { + return addrMetadata; + } + + @Override + public long getRootAddr() { + return addrRoot; + } + + @Override + public long getBloomFilterAddr() { + return addrBloom; + } + + @Override + public int getHeight() { + return height; + } + + @Override + public int getGlobalDepth() { + return 0; // ZERO since not HTree. + } + + @Override + public long getNodeCount() { + return nnodes; + } + + @Override + public long getLeafCount() { + return nleaves; + } + + @Override + public long getEntryCount() { + return nentries; + } + + /** + * {@inheritDoc} + * <p> + * Note: There is no counter associated with an {@link IndexSegment}. The + * counter is only available for the {@link BTree}. + * + * @return <code>0L</code> + */ + @Override + public long getCounter() { + return 0; + } + + @Override + public long getRecordVersion() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public IndexTypeEnum getIndexType() { + return IndexTypeEnum.BTree; + } + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/PageStats.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/PageStats.java 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/PageStats.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -29,33 +29,10 @@ /** * Class reports various summary statistics for nodes and leaves. */ -abstract public class PageStats { +abstract public class PageStats extends BaseIndexStats { /** Number of nodes/leaves visited so far. */ public long nvisited; - /** The type of index. */ - public IndexTypeEnum indexType; - /** - * The name associated with the index -or- <code>null</code> if the index is - * not named. - */ - public String name; - /** - * The current branching factor for the index. - * - * TODO GIST: [m] is BTree specific. The [addressBits] concept is the - * parallel for the HTree. This field should probably be moved into the - * concrete instances of the {@link PageStats} class. - */ - public int m; - /** The #of entries in the index. */ - public long ntuples; - /** The height (aka depth) of the index */ - public int height; - /** The #of nodes visited. */ - public long nnodes; - /** The #of leaves visited. */ - public long nleaves; /** The #of bytes in the raw records for the nodes visited. */ public long nodeBytes; /** The #of bytes in the raw records for the leaves visited. */ @@ -178,7 +155,8 @@ * * @return The header row. */ - public static String getHeaderRow() { + @Override + public String getHeaderRow() { final StringBuilder sb = new StringBuilder(); @@ -242,6 +220,7 @@ * * @see #getHeaderRow() */ + @Override public String getDataRow() { final PageStats stats = this; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/HTree.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/HTree.java 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/HTree.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -42,6 +42,7 @@ import com.bigdata.btree.AbstractNode; import com.bigdata.btree.BTree; import com.bigdata.btree.BTreeCounters; +import com.bigdata.btree.BaseIndexStats; import com.bigdata.btree.BytesUtil; import com.bigdata.btree.Checkpoint; import com.bigdata.btree.HTreeIndexMetadata; @@ -54,7 +55,6 @@ import com.bigdata.btree.IndexTypeEnum; import com.bigdata.btree.Leaf; import com.bigdata.btree.Node; -import com.bigdata.btree.PageStats; import com.bigdata.btree.ReadOnlyCounter; import com.bigdata.btree.UnisolatedReadWriteIndex; import com.bigdata.btree.keys.IKeyBuilder; @@ -1698,8 +1698,14 @@ } @Override - public PageStats dumpPages() { + public BaseIndexStats dumpPages(final boolean recursive) { + if (!recursive) { + + return new BaseIndexStats(this); + + } + final HTreePageStats stats = new HTreePageStats(); getRoot().dumpPages(stats); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -44,13 +44,12 @@ import org.apache.log4j.Logger; import com.bigdata.btree.AbstractBTree; +import com.bigdata.btree.BaseIndexStats; import com.bigdata.btree.BytesUtil; import com.bigdata.btree.DumpIndex; import com.bigdata.btree.ICheckpointProtocol; -import com.bigdata.btree.ISimpleTreeIndexAccess; import com.bigdata.btree.ITupleIterator; import com.bigdata.btree.IndexTypeEnum; -import com.bigdata.btree.PageStats; import com.bigdata.htree.AbstractHTree; import com.bigdata.io.SerializerUtil; import com.bigdata.rawstore.Bytes; @@ -342,7 +341,8 @@ } finally { - w.close(); + // Note: DO NOT close stdout! +// w.close(); } } @@ -649,9 +649,8 @@ final Iterator<String> nitr = journal.indexNameScan(null/* prefix */, commitRecord.getTimestamp()); - final Map<String, PageStats> pageStats = dumpPages ? new TreeMap<String, PageStats>() - : null; - + final Map<String, BaseIndexStats> pageStats = new TreeMap<String, BaseIndexStats>(); + while (nitr.hasNext()) { // a registered index. @@ -726,19 +725,14 @@ * then we could generate (parts of) this table very quickly. As it * stands, we have to actually scan the pages in the index. */ - if (ndx instanceof ISimpleTreeIndexAccess) { + { - if (pageStats != null) { + final BaseIndexStats stats = ndx.dumpPages(dumpPages); - final PageStats stats = ((ISimpleTreeIndexAccess) ndx) - .dumpPages(); + out.println("\t" + stats); - out.println("\t" + stats); + pageStats.put(name, stats); - pageStats.put(name, stats); - - } - if (dumpIndices) { if (ndx instanceof AbstractBTree) { @@ -757,18 +751,18 @@ } // while(itr) (next index) - if (pageStats != null) { + { /* * Write out the header. */ - out.println(PageStats.getHeaderRow()); + boolean first = true; - for (Map.Entry<String, PageStats> e : pageStats.entrySet()) { + for (Map.Entry<String, BaseIndexStats> e : pageStats.entrySet()) { final String name = e.getKey(); - final PageStats stats = e.getValue(); + final BaseIndexStats stats = e.getValue(); if (stats == null) { @@ -787,6 +781,14 @@ } + if (first) { + + out.println(stats.getHeaderRow()); + + first = false; + + } + /* * Write out the stats for this index. */ Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/htree/TestHTreeWithMemStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/htree/TestHTreeWithMemStore.java 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/htree/TestHTreeWithMemStore.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -34,13 +34,13 @@ import org.apache.log4j.Logger; import com.bigdata.btree.BTreeCounters; +import com.bigdata.btree.BaseIndexStats; import com.bigdata.btree.BytesUtil; import com.bigdata.btree.DefaultTupleSerializer; import com.bigdata.btree.HTreeIndexMetadata; import com.bigdata.btree.ITuple; import com.bigdata.btree.ITupleIterator; import com.bigdata.btree.ITupleSerializer; -import com.bigdata.btree.PageStats; import com.bigdata.btree.keys.ASCIIKeyBuilderFactory; import com.bigdata.btree.keys.IKeyBuilder; import com.bigdata.btree.keys.KeyBuilder; @@ -472,7 +472,7 @@ htree.writeCheckpoint(); // Verify that we can compute the page stats. - final PageStats stats = htree.dumpPages(); + final BaseIndexStats stats = htree.dumpPages(true/*recursive*/); if (log.isInfoEnabled()) log.info(stats.toString()); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -265,6 +265,11 @@ .dumpJournal(true/* dumpHistory */, true/* dumpPages */, true/* dumpIndices */, false/* showTuples */); + // test again w/o dumpPages + new DumpJournal(src) + .dumpJournal(true/* dumpHistory */, false/* dumpPages */, + true/* dumpIndices */, false/* showTuples */); + } finally { src.destroy(); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -128,11 +128,16 @@ /* * Look for linked data GET requests. + * + * Note: URIs ending in /sparql are *assumed* to be SPARQL end points on + * this server. A GET against a SPARQL end point is a SERVICE + * DESCRIPTION request (not a DESCRIBE) and will be handled by the + * QueryServlet. */ final String pathInfo = req.getPathInfo(); - if (pathInfo != null) { + if (pathInfo != null && !pathInfo.endsWith("/sparql")) { final URI uri = new URIImpl(req.getRequestURL().toString()); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2012-12-12 17:53:16 UTC (rev 6764) @@ -32,6 +32,7 @@ import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -107,6 +108,15 @@ private static final String DUMP_PAGES = "dumpPages"; /** + * Restrict a low-level dump of the journal to only the indices having the + * specified namespace prefix. The {@link #DUMP_JOURNAL} option MUST also be + * specified. + * + * @see DumpJournal + */ + private static final String DUMP_NAMESPACE = "dumpNamespace"; + + /** * The name of a request parameter used to request a display of the * currently running queries. Legal values for this request parameter are * either {@value #DETAILS} or no value. @@ -332,11 +342,35 @@ true/* autoFlush */); out.print("<pre>\n"); - - final DumpJournal dump = new DumpJournal((Journal) getIndexManager()); - final List<String> namespaces = Collections.emptyList(); - + final DumpJournal dump = new DumpJournal( + (Journal) getIndexManager()); + + final List<String> namespaces; + + // Add in any specified namespace(s) (defaults to all). + { + + final String[] a = req.getParameterValues(DUMP_NAMESPACE); + + if (a == null) { + + namespaces = Collections.emptyList(); + + } else { + + namespaces = new LinkedList<String>(); + + for (String namespace : a) { + + namespaces.add(namespace); + + } + + } + + } + final boolean dumpHistory = false; final boolean dumpPages = req.getParameter(DUMP_PAGES) != null; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-war/src/html/index.html =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-war/src/html/index.html 2012-12-10 14:43:49 UTC (rev 6763) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-war/src/html/index.html 2012-12-12 17:53:16 UTC (rev 6764) @@ -22,6 +22,8 @@ <dd>This page.</dd> <dt>http://hostname:port/bigdata/sparql</dt> <dd>The SPARQL REST API (<a href="sparql">Service Description</a>).</dd> +<dt>http://hostname:port/bigdata/namespace</dt> +<dd>VoID <a href="namespace">graph of available KBs</a> from this service.</dd> <dt>http://hostname:port/bigdata/status</dt> <dd>A <a href="status">status</a> page.</dd> <dt>http://hostname:port/bigdata/counters</dt> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-12-12 19:15:33
|
Revision: 6765 http://bigdata.svn.sourceforge.net/bigdata/?rev=6765&view=rev Author: thompsonbry Date: 2012-12-12 19:15:23 +0000 (Wed, 12 Dec 2012) Log Message: ----------- Added support for issueing queries and update requests against non-default KBs from the NSS index.html page. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-war/src/html/index.html Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2012-12-12 17:53:16 UTC (rev 6764) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2012-12-12 19:15:23 UTC (rev 6765) @@ -188,6 +188,15 @@ */ protected static final String USING_NAMED_GRAPH_URI = "using-named-graph-uri"; + /** + * URL query parameter used to specify a non-default KB namespace (as an + * alternative to having it in the URL path). The path takes precendence + * over this query parameter. + * + * @see BigdataRDFServlet#getNamespace(HttpServletRequest) + */ + protected static final String NAMESPACE = "namespace"; + private final SparqlEndpointConfig m_config; /** Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2012-12-12 17:53:16 UTC (rev 6764) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2012-12-12 19:15:23 UTC (rev 6765) @@ -267,6 +267,21 @@ if (snmsp == -1) { + String s = req.getParameter(BigdataRDFContext.NAMESPACE); + + if (s != null) { + + s = s.trim(); + + if (s.length() > 0) { + + // Specified as a query parameter. + return s; + + } + + } + // use the default namespace. return getConfig().namespace; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-war/src/html/index.html =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-war/src/html/index.html 2012-12-12 17:53:16 UTC (rev 6764) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-war/src/html/index.html 2012-12-12 19:15:23 UTC (rev 6765) @@ -40,11 +40,15 @@ use GET for database queries since they are, by and large, idempotent. --> <h2><a href="http://www.w3.org/TR/sparql11-query/"> SPARQL Query </a></h2> -<FORM action="sparql" method="post"> +<FORM action="sparql" method="post" name="QUERY"> <P> <TEXTAREA name="query" rows="10" cols="80" title="Enter SPARQL Query." >SELECT * { ?s ?p ?o } LIMIT 1</TEXTAREA> </P><P> + Tenant Namespace + <INPUT type="text" name="namespace" title="Tenant namespace." + > (leave empty for default KB) + </P><P> <INPUT type="submit" value="Send" title="Submit query."> <INPUT type="checkbox" name="explain" value="true" title="Explain query plan rather than returning the query results." @@ -70,6 +74,10 @@ dc:creator "A.N.Other" . }</TEXTAREA> </P><P> + Tenant Namespace + <INPUT type="text" name="namespace" title="Tenant namespace." + > (leave empty for default KB) + </P><P> <INPUT type="submit" value="Send" title="Submit Update."> <!--INPUT type="checkbox" name="explain" value="true" title="Explain query plan rather than returning the query results." This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-12-19 18:43:42
|
Revision: 6786 http://bigdata.svn.sourceforge.net/bigdata/?rev=6786&view=rev Author: thompsonbry Date: 2012-12-19 18:43:27 +0000 (Wed, 19 Dec 2012) Log Message: ----------- Merging in change set for ticket to update dependencies to Sesame 2.6.10. There are some new test failures. Most of these are negative syntax tests for the SPARQL parser. There are a few new test failures for new tests. @see https://sourceforge.net/apps/trac/bigdata/ticket/593 (Upgrade to Sesame 2.6.10) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/.classpath branches/BIGDATA_RELEASE_1_2_0/bigdata-gom/src/test/com/bigdata/gom/TestNumericBNodes.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/nquads/NQuadsParser.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/nquads/NQuadsParserFactory.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/ntriples/BigdataNTriplesParser.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/ntriples/BigdataNTriplesParserFactory.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/Att.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/Atts.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/BigdataRDFXMLParser.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/BigdataRDFXMLParserFactory.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/BigdataRDFXMLWriter.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/BigdataRDFXMLWriterFactory.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/SAXFilter.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/turtle/BigdataTurtleParser.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/turtle/BigdataTurtleParserFactory.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FunctionRegistry.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/HavingNode.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysis.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BD.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/rio/nquads/TestNQuadsParser.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/rio/nquads/TestNQuadsParserFactory.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ASTVisitorBase.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/BaseDeclProcessor.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/BatchRDFValueResolver.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/Bigdata2ASTSPARQLParser.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/BigdataASTContext.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/BigdataASTVisitorBase.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/BigdataExprBuilder.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/BigdataParsedQuery.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/BigdataParsedUpdate.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/BlankNodeVarProcessor.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/DatasetDeclProcessor.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/GroupGraphPattern.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/GroupGraphPatternBuilder.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/NegatedPropertySet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/PrefixDeclProcessor.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/PropertySetElem.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/SPARQLUtil.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/StringEscapesProcessor.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/TriplePatternExprBuilder.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/UpdateExprBuilder.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ValueExprBuilder.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/ASTOperationContainer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/ASTQueryContainer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/ASTSelect.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/ASTSelectQuery.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/ASTUpdateContainer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/ASTUpdateSequence.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/SyntaxTreeBuilder.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/SyntaxTreeBuilderConstants.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/SyntaxTreeBuilderTokenManager.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/sparql.jj branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/sparql.jjt branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/Bigdata2ASTSPARQL11SyntaxTest.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestAll.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestAll_AST.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataComplexSparqlQueryTest.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestBigdataSailRemoteRepository.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/org/openrdf/query/parser/sparql/ComplexSPARQLQueryTest.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/org/openrdf/query/parser/sparql/EarlReport.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/org/openrdf/query/parser/sparql/SPARQLASTQueryTest.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/org/openrdf/query/parser/sparql/SPARQLQueryTest.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/org/openrdf/query/parser/sparql/SPARQLUpdateTest.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/org/openrdf/sail/RDFStoreTest.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/lib/openrdf-sesame-2.6.10-onejar.jar branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/lib/sesame-rio-testsuite-2.6.10.jar branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/lib/sesame-sparql-testsuite-2.6.10.jar branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/lib/sesame-store-testsuite-2.6.10.jar branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/BigdataSPARQL2ASTParserTest.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataArbitraryLengthPathTest.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateConformanceTest.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/org/openrdf/query/parser/sparql/SPARQL11SyntaxTest.java Removed Paths: ------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/lib/openrdf-sesame-2.6.3-onejar.jar branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/lib/sesame-rio-testsuite-2.6.3.jar branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/lib/sesame-sparql-testsuite-2.6.3.jar branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/lib/sesame-store-testsuite-2.6.3.jar Property Changed: ---------------- branches/BIGDATA_RELEASE_1_2_0/ branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty/ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate/ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph/ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util/ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba/ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166/ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/bop/joinGraph/ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/bop/util/ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/jsr166/ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/util/httpd/ branches/BIGDATA_RELEASE_1_2_0/bigdata-compatibility/ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/attr/ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/disco/ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/util/config/ branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/ branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/btc/ branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/btc/src/resources/ branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/lubm/ branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/uniprot/ branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/uniprot/src/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/error/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/samples/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ branches/BIGDATA_RELEASE_1_2_0/dsi-utils/ branches/BIGDATA_RELEASE_1_2_0/dsi-utils/LEGAL/ branches/BIGDATA_RELEASE_1_2_0/dsi-utils/lib/ branches/BIGDATA_RELEASE_1_2_0/dsi-utils/src/ branches/BIGDATA_RELEASE_1_2_0/dsi-utils/src/java/ branches/BIGDATA_RELEASE_1_2_0/dsi-utils/src/java/it/ branches/BIGDATA_RELEASE_1_2_0/dsi-utils/src/java/it/unimi/ branches/BIGDATA_RELEASE_1_2_0/dsi-utils/src/test/ branches/BIGDATA_RELEASE_1_2_0/dsi-utils/src/test/it/unimi/ branches/BIGDATA_RELEASE_1_2_0/dsi-utils/src/test/it/unimi/dsi/ branches/BIGDATA_RELEASE_1_2_0/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/BIGDATA_RELEASE_1_2_0/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/BIGDATA_RELEASE_1_2_0/osgi/ branches/BIGDATA_RELEASE_1_2_0/src/resources/bin/config/ Property changes on: branches/BIGDATA_RELEASE_1_2_0 ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 Modified: branches/BIGDATA_RELEASE_1_2_0/.classpath =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/.classpath 2012-12-19 18:19:21 UTC (rev 6785) +++ branches/BIGDATA_RELEASE_1_2_0/.classpath 2012-12-19 18:43:27 UTC (rev 6786) @@ -66,15 +66,11 @@ <classpathentry exported="true" kind="lib" path="bigdata/lib/unimi/fastutil-5.1.5.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/lucene/lucene-analyzers-3.0.0.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/lucene/lucene-core-3.0.0.jar"/> - <classpathentry kind="lib" path="bigdata-rdf/lib/sesame-rio-testsuite-2.6.3.jar" sourcepath="/org.openrdf.sesame-2.6.3"/> <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/high-scale-lib-v1.1.2.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/junit-ext-1.1-b3-dev.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/slf4j-api-1.6.1.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/slf4j-log4j12-1.6.1.jar"/> - <classpathentry kind="lib" path="bigdata-rdf/lib/openrdf-sesame-2.6.3-onejar.jar" sourcepath="/Users/bryan/Documents/workspace/org.openrdf.sesame-2.6.3"/> - <classpathentry kind="lib" path="bigdata-sails/lib/sesame-sparql-testsuite-2.6.3.jar" sourcepath="/org.openrdf.sesame-2.6.3"/> - <classpathentry kind="lib" path="bigdata-sails/lib/sesame-store-testsuite-2.6.3.jar"/> <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/commons-codec-1.4.jar"/> <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/commons-logging-1.1.1.jar"/> <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/httpclient-4.1.3.jar"/> @@ -84,5 +80,9 @@ <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/commons-fileupload-1.2.2.jar"/> <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/commons-io-2.1.jar"/> <classpathentry kind="lib" path="bigdata/lib/apache/log4j-1.2.17.jar"/> + <classpathentry kind="lib" path="bigdata-rdf/lib/openrdf-sesame-2.6.10-onejar.jar"/> + <classpathentry kind="lib" path="bigdata-rdf/lib/sesame-rio-testsuite-2.6.10.jar"/> + <classpathentry kind="lib" path="bigdata-sails/lib/sesame-sparql-testsuite-2.6.10.jar"/> + <classpathentry kind="lib" path="bigdata-sails/lib/sesame-store-testsuite-2.6.10.jar"/> <classpathentry kind="output" path="bin"/> </classpath> Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/htree/raba:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/htree/raba:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4814-4836 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166 ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/jsr166:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/jsr166:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/jsr166:6769-6785 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/jsr166:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/jsr166:4814-4836 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/bop/joinGraph:6769-6785 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4814-4836 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/bop/util:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/bop/util:6769-6785 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/bop/util:4814-4836 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/jsr166 ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/jsr166:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/jsr166:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/jsr166:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/jsr166:6769-6785 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/jsr166:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/jsr166:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/jsr166:4814-4836 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/util/httpd ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/util/httpd:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/util/httpd:6769-6785 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/test/com/bigdata/util/httpd:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4814-4836 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-compatibility ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata-compatibility:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-compatibility:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-compatibility:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-compatibility:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-compatibility:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-compatibility:6769-6785 /branches/INT64_BRANCH/bigdata-compatibility:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-compatibility:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-compatibility:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-compatibility:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-compatibility:4814-4836 Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-gom/src/test/com/bigdata/gom/TestNumericBNodes.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-gom/src/test/com/bigdata/gom/TestNumericBNodes.java 2012-12-19 18:19:21 UTC (rev 6785) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-gom/src/test/com/bigdata/gom/TestNumericBNodes.java 2012-12-19 18:43:27 UTC (rev 6786) @@ -77,7 +77,7 @@ final String resource = "foaf-tbl-plus-6-degrees-small.nq"; - load(getClass().getResource(resource), NQuadsParser.nquads); + load(getClass().getResource(resource), RDFFormat.NQUADS); new Example1(om).call(); Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/attr ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/attr:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-jini/src/java/com/bigdata/attr:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-jini/src/java/com/bigdata/attr:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/attr:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-jini/src/java/com/bigdata/attr:4175-4387 /branches/LEXICON_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/attr:2633-3304 /branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/attr:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-jini/src/java/com/bigdata/attr:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-jini/src/java/com/bigdata/attr:4814-4836 /branches/bugfix-btm/bigdata-jini/src/java/com/bigdata/attr:2594-3237 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/attr:2574-2730 /branches/fko/bigdata-jini/src/java/com/bigdata/attr:3150-3194 /trunk/bigdata-jini/src/java/com/bigdata/attr:2981-3282,3368-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-jini/src/java/com/bigdata/attr:6769-6785 /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/attr:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-jini/src/java/com/bigdata/attr:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-jini/src/java/com/bigdata/attr:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/attr:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-jini/src/java/com/bigdata/attr:4175-4387 /branches/LEXICON_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/attr:2633-3304 /branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/attr:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-jini/src/java/com/bigdata/attr:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-jini/src/java/com/bigdata/attr:4814-4836 /branches/bugfix-btm/bigdata-jini/src/java/com/bigdata/attr:2594-3237 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/attr:2574-2730 /branches/fko/bigdata-jini/src/java/com/bigdata/attr:3150-3194 /trunk/bigdata-jini/src/java/com/bigdata/attr:2981-3282,3368-3437,3656-4061 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/disco ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/disco:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-jini/src/java/com/bigdata/disco:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-jini/src/java/com/bigdata/disco:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/disco:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-jini/src/java/com/bigdata/disco:4175-4387 /branches/LEXICON_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/disco:2633-3304 /branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/disco:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-jini/src/java/com/bigdata/disco:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-jini/src/java/com/bigdata/disco:4814-4836 /branches/bugfix-btm/bigdata-jini/src/java/com/bigdata/disco:2594-3237 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/disco:2574-2730 /branches/fko/bigdata-jini/src/java/com/bigdata/disco:3150-3194 /trunk/bigdata-jini/src/java/com/bigdata/disco:2981-3282,3368-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-jini/src/java/com/bigdata/disco:6769-6785 /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/disco:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-jini/src/java/com/bigdata/disco:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-jini/src/java/com/bigdata/disco:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/disco:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-jini/src/java/com/bigdata/disco:4175-4387 /branches/LEXICON_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/disco:2633-3304 /branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/disco:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-jini/src/java/com/bigdata/disco:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-jini/src/java/com/bigdata/disco:4814-4836 /branches/bugfix-btm/bigdata-jini/src/java/com/bigdata/disco:2594-3237 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/disco:2574-2730 /branches/fko/bigdata-jini/src/java/com/bigdata/disco:3150-3194 /trunk/bigdata-jini/src/java/com/bigdata/disco:2981-3282,3368-3437,3656-4061 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/util/config ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-jini/src/java/com/bigdata/util/config:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-jini/src/java/com/bigdata/util/config:4175-4387 /branches/LEXICON_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:2633-3304 /branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-jini/src/java/com/bigdata/util/config:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:4814-4836 /branches/bugfix-btm/bigdata-jini/src/java/com/bigdata/util/config:2594-3237 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/util/config:2574-2730 /branches/fko/bigdata-jini/src/java/com/bigdata/util/config:3150-3194 /trunk/bigdata-jini/src/java/com/bigdata/util/config:2981-3282,3368-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-jini/src/java/com/bigdata/util/config:6769-6785 /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-jini/src/java/com/bigdata/util/config:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-jini/src/java/com/bigdata/util/config:4175-4387 /branches/LEXICON_REFACTOR_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:2633-3304 /branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-jini/src/java/com/bigdata/util/config:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:4814-4836 /branches/bugfix-btm/bigdata-jini/src/java/com/bigdata/util/config:2594-3237 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/util/config:2574-2730 /branches/fko/bigdata-jini/src/java/com/bigdata/util/config:3150-3194 /trunk/bigdata-jini/src/java/com/bigdata/util/config:2981-3282,3368-3437,3656-4061 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-perf ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-perf:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-perf:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-perf:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-perf:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf:4175-4387 /branches/LEXICON_REFACTOR_BRANCH/bigdata-perf:2633-3304 /branches/QUADS_QUERY_BRANCH/bigdata-perf:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-perf:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-perf:4814-4836 /branches/bugfix-btm/bigdata-perf:2594-3237 /branches/dev-btm/bigdata-perf:2574-2730 /branches/fko/bigdata-perf:3150-3194 /trunk/bigdata-perf:2981-3043,3368-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-perf:6769-6785 /branches/BTREE_BUFFER_BRANCH/bigdata-perf:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-perf:2270-2546,2548-2782 /branches/INT64_BRANCH/bigdata-perf:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-perf:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf:4175-4387 /branches/LEXICON_REFACTOR_BRANCH/bigdata-perf:2633-3304 /branches/QUADS_QUERY_BRANCH/bigdata-perf:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-perf:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-perf:4814-4836 /branches/bugfix-btm/bigdata-perf:2594-3237 /branches/dev-btm/bigdata-perf:2574-2730 /branches/fko/bigdata-perf:3150-3194 /trunk/bigdata-perf:2981-3043,3368-3437,3656-4061 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/btc ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata-perf/btc:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-perf/btc:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/btc:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-perf/btc:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-perf/btc:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-perf/btc:4814-4836 /trunk/bigdata-perf/btc:2981-3043,3368-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-perf/btc:6769-6785 /branches/INT64_BRANCH/bigdata-perf/btc:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-perf/btc:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/btc:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-perf/btc:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-perf/btc:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-perf/btc:4814-4836 /trunk/bigdata-perf/btc:2981-3043,3368-3437,3656-4061 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/btc/src/resources ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata-perf/btc/src/resources:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-perf/btc/src/resources:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/btc/src/resources:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-perf/btc/src/resources:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-perf/btc/src/resources:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-perf/btc/src/resources:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-perf/btc/src/resources:6769-6785 /branches/INT64_BRANCH/bigdata-perf/btc/src/resources:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-perf/btc/src/resources:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/btc/src/resources:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-perf/btc/src/resources:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-perf/btc/src/resources:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-perf/btc/src/resources:4814-4836 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/lubm ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata-perf/lubm:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/lubm:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-perf/lubm:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-perf/lubm:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-perf/lubm:6769-6785 /branches/INT64_BRANCH/bigdata-perf/lubm:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/lubm:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-perf/lubm:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-perf/lubm:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-perf/lubm:4814-4836 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/uniprot ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata-perf/uniprot:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-perf/uniprot:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/uniprot:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-perf/uniprot:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-perf/uniprot:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-perf/uniprot:4814-4836 /trunk/bigdata-perf/uniprot:2981-3043,3368-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-perf/uniprot:6769-6785 /branches/INT64_BRANCH/bigdata-perf/uniprot:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-perf/uniprot:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/uniprot:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-perf/uniprot:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-perf/uniprot:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-perf/uniprot:4814-4836 /trunk/bigdata-perf/uniprot:2981-3043,3368-3437,3656-4061 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-perf/uniprot/src ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata-perf/uniprot/src:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-perf/uniprot/src:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/uniprot/src:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-perf/uniprot/src:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-perf/uniprot/src:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-perf/uniprot/src:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-perf/uniprot/src:6769-6785 /branches/INT64_BRANCH/bigdata-perf/uniprot/src:4486-4522 /branches/JOURNAL_HA_BRANCH/bigdata-perf/uniprot/src:2596-4066 /branches/LARGE_LITERALS_REFACTOR/bigdata-perf/uniprot/src:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-perf/uniprot/src:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-perf/uniprot/src:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-perf/uniprot/src:4814-4836 Copied: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/lib/openrdf-sesame-2.6.10-onejar.jar (from rev 6785, branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-rdf/lib/openrdf-sesame-2.6.10-onejar.jar) =================================================================== (Binary files differ) Deleted: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/lib/openrdf-sesame-2.6.3-onejar.jar =================================================================== (Binary files differ) Copied: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/lib/sesame-rio-testsuite-2.6.10.jar (from rev 6785, branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-rdf/lib/sesame-rio-testsuite-2.6.10.jar) =================================================================== (Binary files differ) Deleted: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/lib/sesame-rio-testsuite-2.6.3.jar =================================================================== (Binary files differ) Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:6769-6785 /branches/INT64_BRANCH/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate:4814-4836 Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java 2012-12-19 18:19:21 UTC (rev 6785) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java 2012-12-19 18:43:27 UTC (rev 6786) @@ -111,8 +111,8 @@ r.add(new BigdataRDFXMLParserFactory()); - // Note: This ensures that the RDFFormat for NQuads is loaded. - r.get(NQuadsParser.nquads); +// // Note: This ensures that the RDFFormat for NQuads is loaded. +// r.get(RDFFormat.NQUADS); r.add(new BigdataNTriplesParserFactory()); Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-rdf/src/java/com/bigdata/rdf/changesets:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-rdf/src/java/com/bigdata/rdf/changesets:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-rdf/src/java/com/bigdata/rdf/changesets:6769-6785 /branches/INT64_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-rdf/src/java/com/bigdata/rdf/changesets:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-rdf/src/java/com/bigdata/rdf/changesets:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets:4814-4836 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/error ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/error:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/error:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-rdf/src/java/com/bigdata/rdf/error:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/error:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-rdf/src/java/com/bigdata/rdf/error:6769-6785 /branches/INT64_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/error:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/error:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-rdf/src/java/com/bigdata/rdf/error:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/error:4814-4836 Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/internal ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-rdf/src/java/com/bigdata/rdf/internal:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal:4525-4531,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796 /branches/RWSTORE_1_1_0_DEBUG/bigdata-rdf/src/java/com/bigdata/rdf/internal:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-rdf/src/java/com/bigdata/rdf/internal:6769-6785 /branches/INT64_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-rdf/src/java/com/bigdata/rdf/internal:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal:4525-4531,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796 /branches/RWSTORE_1_1_0_DEBUG/bigdata-rdf/src/java/com/bigdata/rdf/internal:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal:4814-4836 Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2012-12-19 18:19:21 UTC (rev 6785) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2012-12-19 18:43:27 UTC (rev 6786) @@ -48,7 +48,6 @@ import com.bigdata.rdf.ServiceProviderHook; import com.bigdata.rdf.inf.ClosureStats; import com.bigdata.rdf.rio.RDFParserOptions; -import com.bigdata.rdf.rio.nquads.NQuadsParser; import com.bigdata.rdf.rules.InferenceEngine; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.store.AbstractTripleStore.Options; @@ -258,9 +257,8 @@ * When the {@link RDFFormat} of a resource is not evident, assume that * it is the format specified by this value (default * {@value #DEFAULT_RDF_FORMAT}). The value is one of the {@link String} - * values of the known {@link RDFFormat}s, including - * {@link NQuadsParser#nquads}. It may be null, in which case there is - * no default. + * values of the known {@link RDFFormat}s. It may be null, in which case + * there is no default. */ String RDF_FORMAT = "rdfFormat"; Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/relation ___________________________________________________________________ Modified: svn:mergeinfo - /branches/INT64_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/relation:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-rdf/src/java/com/bigdata/rdf/relation:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/relation:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-rdf/src/java/com/bigdata/rdf/relation:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/relation:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-rdf/src/java/com/bigdata/rdf/relation:6769-6785 /branches/INT64_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/relation:4486-4522 /branches/LARGE_LITERALS_REFACTOR/bigdata-rdf/src/java/com/bigdata/rdf/relation:4175-4387 /branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/relation:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata-rdf/src/java/com/bigdata/rdf/relation:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/relation:4814-4836 Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/nquads/NQuadsParser.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/nquads/NQuadsParser.java 2012-12-19 18:19:21 UTC (rev 6785) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/rio/nquads/NQuadsParser.java 2012-12-19 18:43:27 UTC (rev 6786) @@ -31,8 +31,6 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; -import java.nio.charset.Charset; -import java.util.Arrays; import org.apache.log4j.Logger; import org.openrdf.model.Literal; @@ -68,60 +66,60 @@ final protected transient static Logger log = Logger .getLogger(NQuadsParser.class); - /** - * The nquads RDF format. - * <p> - * The file extension <code>.nq</code> is recommended for N-Quads documents. - * The media type is <code>text/x-nquads</code> and the encoding is 7-bit - * US-ASCII. The URI that identifies the N-Quads syntax is - * <code>http://sw.deri.org/2008/07/n-quads/#n-quads</code>. - * </p> - * - * @see http://sw.deri.org/2008/07/n-quads/ - */ - static public final RDFFormat nquads; +// /** +// * The nquads RDF format. +// * <p> +// * The file extension <code>.nq</code> is recommended for N-Quads documents. +// * The media type is <code>text/x-nquads</code> and the encoding is 7-bit +// * US-ASCII. The URI that identifies the N-Quads syntax is +// * <code>http://sw.deri.org/2008/07/n-quads/#n-quads</code>. +// * </p> +// * +// * @see http://sw.deri.org/2008/07/n-quads/ +// */ +// static public final RDFFormat nquads; - /** - * Register an {@link RDFFormat} for the {@link NxParser} to handle nquads. - * - * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/439"> - * Class loader problems </a> - * @see <a href="http://www.openrdf.org/issues/browse/SES-802"> Please add - * support for NQuads format </a> - * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/559"> Use - * RDFFormat.NQUADS as the format identifier for the NQuads parser </a> - */ - static { - - nquads = new RDFFormat( - // format name. - "N-Quads", - // registered mime types. - Arrays.asList(new String[] {"text/x-nquads"}), // - Charset.forName("US-ASCII"), // charset - // file extensions - Arrays.asList(new String[]{"nq"}), - false, // supportsNamespaces, - true // supportsContexts - ); - - // register the nquads format. - RDFFormat.register(nquads); - -// // register the parser factory for nquads. -// RDFParserRegistry.getInstance().add(new RDFParserFactory() { +// /** +// * Register an {@link RDFFormat} for the {@link NxParser} to handle nquads. +// * +// * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/439"> +// * Class loader problems </a> +// * @see <a href="http://www.openrdf.org/issues/browse/SES-802"> Please add +// * support for NQuads format </a> +// * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/559"> Use +// * RDFFormat.NQUADS as the format identifier for the NQuads parser </a> +// */ +// static { // -// public RDFParser getParser() { -// return new NQuadsParser(); -// } +//// nquads = new RDFFormat( +//// // format name. +//// "N-Quads", +//// // registered mime types. +//// Arrays.asList(new String[] {"text/x-nquads"}), // +//// Charset.forName("US-ASCII"), // charset +//// // file extensions +//// Arrays.asList(new String[]{"nq"}), +//// false, // supportsNamespaces, +//// true // supportsContexts +//// ); +//// +//// // register the nquads format. +//// RDFFormat.register(nquads); // -// public RDFFormat getRDFFormat() { -// return nquads; -// } -// -// }); - - } +//// // register the parser factory for nquads. +//// RDFParserRegistry.getInstance().add(new RDFParserFactory() { +//// +//// public RDFParser getParser() { +//// return new NQuadsParser(); +//// } +//// +//// public RDFFormat getRDFFormat() { +//// return nquads; +//// } +//// +//// }); +// +// } private ValueFactory valueFactory = new ValueFactoryImpl(); @@ -140,7 +138,7 @@ public RDFFormat getRDFFormat() { - return nquads; + ... [truncated message content] |
From: <tho...@us...> - 2012-12-22 18:14:13
|
Revision: 6799 http://bigdata.svn.sourceforge.net/bigdata/?rev=6799&view=rev Author: thompsonbry Date: 2012-12-22 18:14:00 +0000 (Sat, 22 Dec 2012) Log Message: ----------- Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/update/ParseOp.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/properties/PropertiesFormat.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUpdate.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteSparql10QueryBuilder.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteSparql11QueryBuilder.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ConnegUtil.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IRemoteRepository.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll.java branches/BIGDATA_RELEASE_1_2_0/build.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/AST2SPARQLUtil.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/EncodeDecodeValue.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IMimeTypes.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/MiniMime.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/client/ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/client/TestAll.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/client/TestEncodeDecodeValue.java Removed Paths: ------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/AST2SPARQLUtil.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/EncodeDecodeValue.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MiniMime.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestEncodeDecodeValue.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/update/ParseOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/update/ParseOp.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/update/ParseOp.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -75,7 +75,7 @@ import com.bigdata.rdf.rio.PresortRioLoader; import com.bigdata.rdf.rio.RDFParserOptions; import com.bigdata.rdf.rio.StatementBuffer; -import com.bigdata.rdf.sail.webapp.MiniMime; +import com.bigdata.rdf.sail.webapp.client.MiniMime; import com.bigdata.rdf.sparql.ast.LoadGraph; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.store.DataLoader; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -37,7 +37,6 @@ import org.openrdf.rio.RDFWriterRegistry; import com.bigdata.rdf.model.StatementEnum; -import com.bigdata.rdf.rio.nquads.NQuadsParser; import com.bigdata.rdf.rio.ntriples.BigdataNTriplesParserFactory; import com.bigdata.rdf.rio.rdfxml.BigdataRDFXMLParserFactory; import com.bigdata.rdf.rio.rdfxml.BigdataRDFXMLWriterFactory; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/properties/PropertiesFormat.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/properties/PropertiesFormat.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/properties/PropertiesFormat.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -65,10 +65,9 @@ import java.util.Iterator; import java.util.concurrent.CopyOnWriteArraySet; -import org.openrdf.rio.RDFFormat; - /** * Formats for a properties file. + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ public class PropertiesFormat extends FileFormat implements Iterable<PropertiesFormat> { Deleted: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/AST2SPARQLUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/AST2SPARQLUtil.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/AST2SPARQLUtil.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -1,174 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Mar 5, 2012 - */ - -package com.bigdata.rdf.sparql.ast; - -import java.util.HashMap; -import java.util.Map; - -import org.openrdf.model.BNode; -import org.openrdf.model.Literal; -import org.openrdf.model.URI; -import org.openrdf.model.Value; - -/** - * Utility class for externalizing {@link TermNode}s. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ -public class AST2SPARQLUtil { - - /** - * The prefix declarations used within the SERVICE clause (from the original - * query). - */ - private final Map<String, String> prefixDecls; - - /** Reverse map for {@link #prefixDecls}. */ - private final Map<String, String> namespaces; - - public AST2SPARQLUtil(final Map<String, String> prefixDecls) { - - this.prefixDecls = prefixDecls; - - if (prefixDecls != null) { - - /* - * Build up a reverse map from namespace to prefix. - */ - - namespaces = new HashMap<String, String>(); - - for (Map.Entry<String, String> e : prefixDecls.entrySet()) { - - namespaces.put(e.getValue(), e.getKey()); - - } - - } else { - - namespaces = null; - - } - - } - - /** - * Return an external form for the {@link Value} suitable for direct - * embedding into a SPARQL query. - * - * @param val - * The value. - * - * @return The external form. - */ - public String toExternal(final Value val) { - - if (val instanceof URI) { - - return toExternal((URI) val); - - } else if (val instanceof Literal) { - - return toExternal((Literal)val); - - } else if (val instanceof BNode) { - - return toExternal((BNode)val); - - } else { - - throw new AssertionError(); - - } - - } - - public String toExternal(final BNode bnd) { - - final String id = bnd.stringValue(); - -// final boolean isLetter = Character.isLetter(id.charAt(0)); - -// return "_:" + (isLetter ? "" : "B") + id; - return "_:B" + id; - - } - - public String toExternal(final URI uri) { - - if (prefixDecls != null) { - - final String prefix = namespaces.get(uri.getNamespace()); - - if (prefix != null) { - - return prefix + ":" + uri.getLocalName(); - - } - - } - - return "<" + uri.stringValue() + ">"; - - } - - public String toExternal(final Literal lit) { - - final String label = lit.getLabel(); - - final String languageCode = lit.getLanguage(); - - final URI datatypeURI = lit.getDatatype(); - - final String datatypeStr = datatypeURI == null ? null - : toExternal(datatypeURI); - - final StringBuilder sb = new StringBuilder((label.length() + 2) - + (languageCode != null ? (languageCode.length() + 1) : 0) - + (datatypeURI != null ? datatypeStr.length() + 2 : 0)); - - sb.append('"'); - sb.append(label); - sb.append('"'); - - if (languageCode != null) { - sb.append('@'); - sb.append(languageCode); - } - - if (datatypeURI != null) { - sb.append("^^"); - sb.append(datatypeStr); - } - - return sb.toString(); - - } - -} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -398,15 +398,15 @@ */ String SOLUTION_SET_CACHE = "solutionSetCache"; - boolean DEFAULT_SOLUTION_SET_CACHE = false; + boolean DEFAULT_SOLUTION_SET_CACHE = true; /** * When <code>true</code> a DESCRIBE cache will be maintained. This can - * DESCRIBE queries, linked data queries (which are mapped to a DESCRIBE - * query by the NSS), and potentially accelerate star-joins (if the query - * plan is rewritten to hit the DESCRIBE cache and obtain the materialized - * joins from it, but this is best done with a fully materialized and - * synchronously maintained DESCRIBE cache). + * accelerate DESCRIBE queries, linked data queries (which are mapped to a + * DESCRIBE query by the NSS), and potentially accelerate star-joins (if the + * query plan is rewritten to hit the DESCRIBE cache and obtain the + * materialized joins from it, but this is best done with a fully + * materialized and synchronously maintained DESCRIBE cache). * * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/584"> * DESCRIBE CACHE </a> Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUpdate.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUpdate.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUpdate.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -96,7 +96,7 @@ import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; import com.bigdata.rdf.sail.SPARQLUpdateEvent; import com.bigdata.rdf.sail.Sesame2BigdataIterator; -import com.bigdata.rdf.sail.webapp.MiniMime; +import com.bigdata.rdf.sail.webapp.client.MiniMime; import com.bigdata.rdf.sparql.ast.ASTContainer; import com.bigdata.rdf.sparql.ast.AbstractGraphDataUpdate; import com.bigdata.rdf.sparql.ast.AddGraph; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteSparql10QueryBuilder.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteSparql10QueryBuilder.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteSparql10QueryBuilder.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -40,7 +40,7 @@ import org.openrdf.query.impl.EmptyBindingSet; import com.bigdata.bop.IVariable; -import com.bigdata.rdf.sparql.ast.AST2SPARQLUtil; +import com.bigdata.rdf.sail.webapp.client.AST2SPARQLUtil; /** * Utility class constructs a valid SPARQL query for a remote Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteSparql11QueryBuilder.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteSparql11QueryBuilder.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteSparql11QueryBuilder.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -39,7 +39,7 @@ import org.openrdf.query.BindingSet; import com.bigdata.bop.IVariable; -import com.bigdata.rdf.sparql.ast.AST2SPARQLUtil; +import com.bigdata.rdf.sail.webapp.client.AST2SPARQLUtil; /** * Utility class constructs a valid SPARQL query for a remote Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -58,6 +58,7 @@ import com.bigdata.rdf.properties.PropertiesWriterRegistry; import com.bigdata.rdf.rules.ConstraintViolationException; import com.bigdata.rdf.sail.webapp.XMLBuilder.Node; +import com.bigdata.rdf.sail.webapp.client.EncodeDecodeValue; import com.bigdata.util.InnerCause; /** Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -42,12 +42,13 @@ import com.bigdata.journal.IIndexManager; import com.bigdata.journal.Journal; import com.bigdata.quorum.Quorum; +import com.bigdata.rdf.sail.webapp.client.IMimeTypes; /** * Useful glue for implementing service actions, but does not directly implement * any service action/ */ -abstract public class BigdataServlet extends HttpServlet { +abstract public class BigdataServlet extends HttpServlet implements IMimeTypes { /** * @@ -93,25 +94,6 @@ HTTP_INTERNALERROR = HttpServletResponse.SC_INTERNAL_SERVER_ERROR, HTTP_NOTIMPLEMENTED = HttpServletResponse.SC_NOT_IMPLEMENTED; - /** - * Common MIME types for dynamic content. - */ - public static final transient String - MIME_TEXT_PLAIN = "text/plain", - MIME_TEXT_HTML = "text/html", -// MIME_TEXT_XML = "text/xml", - /** - * General purpose binary <code>application/octet-stream</code>. - */ - MIME_DEFAULT_BINARY = "application/octet-stream", - MIME_APPLICATION_XML = "application/xml", - MIME_TEXT_JAVASCRIPT = "text/javascript", - /** - * The traditional encoding of URL query parameters within a POST - * message body. - */ - MIME_APPLICATION_URL_ENCODED = "application/x-www-form-urlencoded"; - protected <T> T getRequiredServletContextAttribute(final String name) { @SuppressWarnings("unchecked") Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ConnegUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ConnegUtil.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ConnegUtil.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -39,6 +39,7 @@ import org.openrdf.rio.RDFFormat; import com.bigdata.rdf.properties.PropertiesFormat; +import com.bigdata.rdf.sail.webapp.client.MiniMime; /** Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -49,6 +49,8 @@ import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask; +import com.bigdata.rdf.sail.webapp.client.EncodeDecodeValue; +import com.bigdata.rdf.sail.webapp.client.MiniMime; /** * Handler for DELETE by query (DELETE verb) and DELETE by data (POST). Deleted: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/EncodeDecodeValue.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/EncodeDecodeValue.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/EncodeDecodeValue.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -1,445 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Oct 13, 2011 - */ - -package com.bigdata.rdf.sail.webapp; - -import org.openrdf.model.BNode; -import org.openrdf.model.Literal; -import org.openrdf.model.Resource; -import org.openrdf.model.URI; -import org.openrdf.model.Value; -import org.openrdf.model.impl.LiteralImpl; -import org.openrdf.model.impl.URIImpl; - -/** - * Utility class to encode/decode RDF {@link Value}s for interchange with the - * REST API. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ -public class EncodeDecodeValue { - -// /* -// * Note: The decode logic was derived from the JavaCharStream file generated -// * by JavaCC. -// */ -// -// private static final int hexval(char c) { -// switch (c) { -// case '0': -// return 0; -// case '1': -// return 1; -// case '2': -// return 2; -// case '3': -// return 3; -// case '4': -// return 4; -// case '5': -// return 5; -// case '6': -// return 6; -// case '7': -// return 7; -// case '8': -// return 8; -// case '9': -// return 9; -// -// case 'a': -// case 'A': -// return 10; -// case 'b': -// case 'B': -// return 11; -// case 'c': -// case 'C': -// return 12; -// case 'd': -// case 'D': -// return 13; -// case 'e': -// case 'E': -// return 14; -// case 'f': -// case 'F': -// return 15; -// } -// -// throw new AssertionError(); -// } -// -// private static class DecodeString { -// private final StringBuilder sb = new StringBuilder(); -// private final String src; -// private int srcpos = 0; -// DecodeString(final String s) { -// this.src = s; -// } -// -// private char ReadByte() { -// return src.charAt(srcpos++); -// } -// -// private void backup(final int n) { -// -// sb.setLength(sb.length() - n); -// -// } -// -// /** -// * Read a character. -// * -// * TODO Does not handle the 8 character escape code sequences (but -// * neither does the SPARQL parser!) -// */ -// private char readChar() throws java.io.IOException { -// char c; -// -// sb.append(c = ReadByte()); -// -// if (c == '\\') { -// -// int backSlashCnt = 1; -// -// for (;;) // Read all the backslashes -// { -// -// try { -// sb.append(c=ReadByte()); -// if (c != '\\') { -// // found a non-backslash char. -// if ((c == 'u') && ((backSlashCnt & 1) == 1)) { -// if (--bufpos < 0) -// bufpos = bufsize - 1; -// -// break; -// } -// -// backup(backSlashCnt); -// return '\\'; -// } -// } catch (java.io.IOException e) { -// // We are returning one backslash so we should only -// // backup (count-1) -// if (backSlashCnt > 1) -// backup(backSlashCnt - 1); -// -// return '\\'; -// } -// -// backSlashCnt++; -// } -// -// // Here, we have seen an odd number of backslash's followed by a -// // 'u' -// try { -// while ((c = ReadByte()) == 'u') {} -// -// // Decode the code sequence. -// c = (char) (hexval(c) << 12 | hexval(ReadByte()) << 8 -// | hexval(ReadByte()) << 4 | hexval(ReadByte())); -// -// sb.append(c); -// -// } catch (java.io.IOException e) { -// -// throw new Error("Invalid escape character"); -// -// } -// -// if (backSlashCnt == 1) -// return c; -// else { -// backup(backSlashCnt - 1); -// return '\\'; -// } -// } else { -// return c; -// } -// } -// -// } -// -// /** -// * Apply code point escape sequences for anything that we need to escape. -// * For our purposes, this is just <code>"</code> and <code>></code>. -// * @param s -// * @return -// * -// * @see http://www.w3.org/TR/sparql11-query/#codepointEscape -// */ -// static String encodeEscapeSequences(final String s) { -// -// return s; -// -// } -// -// /** -// * Decode all code point escape sequences. Note that we need to decode more -// * than we encode since we are not responsible for the encoding when it -// * comes to the REST API, just the decoding. -// * -// * @param s -// * The string, which may have escape sequences encoded. -// * -// * @return The string with escape sequences decoded. -// * -// * @throws IllegalArgumentException -// * if the argument is <code>null</code>. -// * @throws IllegalArgumentException -// * if the argument is contains an ill-formed escape code -// * sequence. -// * -// * @see http://www.w3.org/TR/sparql11-query/#codepointEscape -// * -// * FIXME Implement encode/decode. -// */ -// static String decodeEscapeSequences(final String s) { -// -//// // Remove any escape sequences. -//// final StringBuilder sb = new StringBuilder(); -//// for (int i = 0; i < slen; i++) { -//// char ch = s.charAt(i); -//// if (ch == '\\') { -//// if (i + 1 == slen) -//// throw new IllegalArgumentException(s); -//// ch = s.charAt(i); -//// } -//// sb.append(ch); -//// } -//// final String t = sb.toString(); -// -// return s; -// -// } - - /** - * Decode a URI or Literal. - * - * @param s - * The value to be decoded. - * - * @return The URI or literal -or- <code>null</code> if the argument was - * <code>null</code>. - * - * @throws IllegalArgumentException - * if the request parameter could not be decoded as an RDF - * {@link Value}. - */ - public static Value decodeValue(final String s) { - - if(s == null) - return null; - -// final String s = decodeEscapeSequences(ss); - - final int slen = s.length(); - - if (slen == 0) - throw new IllegalArgumentException("<Empty String>"); - - final char ch = s.charAt(0); - - if(ch == '\"' || ch == '\'') { - - /* - * Literal. - */ - - final int closeQuotePos = s.lastIndexOf(ch); - - if (closeQuotePos == 0) - throw new IllegalArgumentException(s); - - final String label = s.substring(1, closeQuotePos); - - if (slen == closeQuotePos + 1) { - - /* - * Plain literal. - */ - - return new LiteralImpl(label); - - } - - final char ch2 = s.charAt(closeQuotePos + 1); - - if (ch2 == '@') { - - /* - * Language code literal. - */ - - final String languageCode = s.substring(closeQuotePos + 2); - - return new LiteralImpl(label, languageCode); - - } else if (ch2 == '^') { - - /* - * Datatype literal. - */ - - if (slen <= closeQuotePos + 2) - throw new IllegalArgumentException(s); - - if (s.charAt(closeQuotePos + 2) != '^') - throw new IllegalArgumentException(s); - - final String datatypeStr = s.substring(closeQuotePos + 3); - - final URI datatypeURI = decodeURI(datatypeStr); - - return new LiteralImpl(label,datatypeURI); - - } else { - - throw new IllegalArgumentException(s); - - } - - } else if (ch == '<') { - - /* - * URI - */ - - if (s.charAt(slen - 1) != '>') - throw new IllegalArgumentException(s); - - final String uriStr = s.substring(1, slen - 1); - - return new URIImpl(uriStr); - - } else { - - throw new IllegalArgumentException(s); - - } - - } - - /** - * Type safe variant for a {@link Resource}. - */ - public static Resource decodeResource(final String param) { - - final Value v = decodeValue(param); - - if (v == null || v instanceof Resource) - return (Resource) v; - - throw new IllegalArgumentException("Not a Resource: '" + param + "'"); - - } - - /** - * Type safe variant for a {@link URI}. - */ - public static URI decodeURI(final String param) { - - final Value v = decodeValue(param); - - if (v == null || v instanceof URI) - return (URI) v; - - throw new IllegalArgumentException("Not an URI: '" + param + "'"); - - } - - /** - * Encode an RDF {@link Value} as it should appear if used in a SPARQL - * query. E.g., a literal will look like <code>"abc"</code>, - * <code>"abc"@en</code> or - * <code>"3"^^xsd:int. A URI will look like <code><http://www.bigdata.com/></code> - * . - * - * @param v - * The value (optional). - * - * @return The encoded value -or- <code>null</code> if the argument is - * <code>null</code>. - * - * @throws IllegalArgumentException - * if the argument is a {@link BNode}. - */ - public static String encodeValue(final Value v) { - if(v == null) - return null; - if (v instanceof BNode) - throw new IllegalArgumentException(); - if (v instanceof URI) { - return "<" + v.stringValue() + ">"; - } - if (v instanceof Literal) { - final Literal lit = (Literal) v; - final StringBuilder sb = new StringBuilder(); - sb.append("\""); - sb.append(lit.getLabel()); - sb.append("\""); - if (lit.getLanguage() != null) { - sb.append("@"); - sb.append(lit.getLanguage()); - } - if (lit.getDatatype() != null) { - sb.append("^^"); - sb.append(encodeValue(lit.getDatatype())); - } - return sb.toString(); - } - throw new AssertionError(); - } - - public static Resource[] decodeResources(final String[] strings) { - if (strings == null || strings.length == 0) - return null; - - final Resource[] resources = new Resource[strings.length]; - for (int i = 0; i < strings.length; i++) { - resources[i] = decodeResource(strings[i]); - } - - return resources; - } - - public static String[] encodeValues(final Value[] values) { - if (values == null || values.length == 0) - return null; - - final String[] strings = new String[values.length]; - for (int i = 0; i < values.length; i++) { - strings[i] = encodeValue(values[i]); - } - - return strings; - } - -} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -46,6 +46,8 @@ import org.openrdf.sail.SailException; import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; +import com.bigdata.rdf.sail.webapp.client.EncodeDecodeValue; +import com.bigdata.rdf.sail.webapp.client.MiniMime; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; /** Deleted: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MiniMime.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MiniMime.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MiniMime.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -1,76 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Mar 7, 2012 - */ - -package com.bigdata.rdf.sail.webapp; - -import org.apache.log4j.Logger; - -/** - * Extract and return the quality score for the mime type (defaults to - * <code>1.0</code>). - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - * - * TODO Lift out patterns. - */ -public class MiniMime { - - static private final Logger log = Logger.getLogger(MiniMime.class); - - public final float q; - - private final String mimeType; - - public final String[][] params; - - public MiniMime(final String s) { - final String[] b = s.split(";"); - mimeType = b[0]; - float q = 1f; - params = new String[b.length][]; - for (int i = 1; i < b.length; i++) { - final String c = b[i]; - final String[] d = c.split("="); - if (d.length < 2) - continue; - params[i] = d; - // params[i][0] = d[0]; - // params[i][1] = d[1]; - if (!d[0].equals("q")) - continue; - q = Float.valueOf(d[1]); - } - if (log.isDebugEnabled()) - log.debug("Considering: " + s + " :: q=" + q); - this.q = q; - } - - public String getMimeType() { - return mimeType; - } -} \ No newline at end of file Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -68,6 +68,7 @@ import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.RunningQuery; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.UpdateTask; +import com.bigdata.rdf.sail.webapp.client.EncodeDecodeValue; import com.bigdata.rdf.sparql.ast.ASTContainer; import com.bigdata.rdf.sparql.ast.QueryRoot; import com.bigdata.rdf.store.AbstractTripleStore; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -50,6 +50,8 @@ import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask; import com.bigdata.rdf.sail.webapp.DeleteServlet.RemoveStatementHandler; import com.bigdata.rdf.sail.webapp.InsertServlet.AddStatementHandler; +import com.bigdata.rdf.sail.webapp.client.EncodeDecodeValue; +import com.bigdata.rdf.sail.webapp.client.MiniMime; /** * Handler for NanoSparqlServer REST API UPDATE operations (PUT, not SPARQL Copied: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/AST2SPARQLUtil.java (from rev 6786, branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/AST2SPARQLUtil.java) =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/AST2SPARQLUtil.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/AST2SPARQLUtil.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -0,0 +1,174 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Mar 5, 2012 + */ + +package com.bigdata.rdf.sail.webapp.client; + +import java.util.HashMap; +import java.util.Map; + +import org.openrdf.model.BNode; +import org.openrdf.model.Literal; +import org.openrdf.model.URI; +import org.openrdf.model.Value; + +/** + * Utility class for externalizing SPARQL prefix declaration management. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class AST2SPARQLUtil { + + /** + * The prefix declarations used within the SERVICE clause (from the original + * query). + */ + private final Map<String, String> prefixDecls; + + /** Reverse map for {@link #prefixDecls}. */ + private final Map<String, String> namespaces; + + public AST2SPARQLUtil(final Map<String, String> prefixDecls) { + + this.prefixDecls = prefixDecls; + + if (prefixDecls != null) { + + /* + * Build up a reverse map from namespace to prefix. + */ + + namespaces = new HashMap<String, String>(); + + for (Map.Entry<String, String> e : prefixDecls.entrySet()) { + + namespaces.put(e.getValue(), e.getKey()); + + } + + } else { + + namespaces = null; + + } + + } + + /** + * Return an external form for the {@link Value} suitable for direct + * embedding into a SPARQL query. + * + * @param val + * The value. + * + * @return The external form. + */ + public String toExternal(final Value val) { + + if (val instanceof URI) { + + return toExternal((URI) val); + + } else if (val instanceof Literal) { + + return toExternal((Literal)val); + + } else if (val instanceof BNode) { + + return toExternal((BNode)val); + + } else { + + throw new AssertionError(); + + } + + } + + public String toExternal(final BNode bnd) { + + final String id = bnd.stringValue(); + +// final boolean isLetter = Character.isLetter(id.charAt(0)); + +// return "_:" + (isLetter ? "" : "B") + id; + return "_:B" + id; + + } + + public String toExternal(final URI uri) { + + if (prefixDecls != null) { + + final String prefix = namespaces.get(uri.getNamespace()); + + if (prefix != null) { + + return prefix + ":" + uri.getLocalName(); + + } + + } + + return "<" + uri.stringValue() + ">"; + + } + + public String toExternal(final Literal lit) { + + final String label = lit.getLabel(); + + final String languageCode = lit.getLanguage(); + + final URI datatypeURI = lit.getDatatype(); + + final String datatypeStr = datatypeURI == null ? null + : toExternal(datatypeURI); + + final StringBuilder sb = new StringBuilder((label.length() + 2) + + (languageCode != null ? (languageCode.length() + 1) : 0) + + (datatypeURI != null ? datatypeStr.length() + 2 : 0)); + + sb.append('"'); + sb.append(label); + sb.append('"'); + + if (languageCode != null) { + sb.append('@'); + sb.append(languageCode); + } + + if (datatypeURI != null) { + sb.append("^^"); + sb.append(datatypeStr); + } + + return sb.toString(); + + } + +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java 2012-12-22 16:45:25 UTC (rev 6798) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -30,7 +30,6 @@ import java.io.UnsupportedEncodingException; import java.net.URLEncoder; import java.util.ArrayList; -import java.util.Arrays; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -44,7 +43,7 @@ import org.openrdf.query.resultio.TupleQueryResultFormat; import org.openrdf.rio.RDFFormat; -import com.bigdata.rdf.ServiceProviderHook; +//import com.bigdata.rdf.ServiceProviderHook; /** * Options for the HTTP connection. @@ -89,7 +88,18 @@ static { - ServiceProviderHook.forceLoad(); + /** + * Note: This has been commented out. If it is included, then a lot of + * the total code base gets dragged into the bigdata-client JAR. If this + * creates a problem for clients, then we will need to examine the + * bigdata RDF model and bigdata RDF parser packages carefully and + * relayer them in order to decouple them from the rest of the code + * base. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/628" > + * Create a bigdata-client jar for the NSS REST API </a> + */ +// ServiceProviderHook.forceLoad(); /* * FIXME We really need to know whether we are talking to a triple or Copied: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/EncodeDecodeValue.java (from rev 6786, branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/EncodeDecodeValue.java) =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/EncodeDecodeValue.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/EncodeDecodeValue.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -0,0 +1,445 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Oct 13, 2011 + */ + +package com.bigdata.rdf.sail.webapp.client; + +import org.openrdf.model.BNode; +import org.openrdf.model.Literal; +import org.openrdf.model.Resource; +import org.openrdf.model.URI; +import org.openrdf.model.Value; +import org.openrdf.model.impl.LiteralImpl; +import org.openrdf.model.impl.URIImpl; + +/** + * Utility class to encode/decode RDF {@link Value}s for interchange with the + * REST API. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class EncodeDecodeValue { + +// /* +// * Note: The decode logic was derived from the JavaCharStream file generated +// * by JavaCC. +// */ +// +// private static final int hexval(char c) { +// switch (c) { +// case '0': +// return 0; +// case '1': +// return 1; +// case '2': +// return 2; +// case '3': +// return 3; +// case '4': +// return 4; +// case '5': +// return 5; +// case '6': +// return 6; +// case '7': +// return 7; +// case '8': +// return 8; +// case '9': +// return 9; +// +// case 'a': +// case 'A': +// return 10; +// case 'b': +// case 'B': +// return 11; +// case 'c': +// case 'C': +// return 12; +// case 'd': +// case 'D': +// return 13; +// case 'e': +// case 'E': +// return 14; +// case 'f': +// case 'F': +// return 15; +// } +// +// throw new AssertionError(); +// } +// +// private static class DecodeString { +// private final StringBuilder sb = new StringBuilder(); +// private final String src; +// private int srcpos = 0; +// DecodeString(final String s) { +// this.src = s; +// } +// +// private char ReadByte() { +// return src.charAt(srcpos++); +// } +// +// private void backup(final int n) { +// +// sb.setLength(sb.length() - n); +// +// } +// +// /** +// * Read a character. +// * +// * TODO Does not handle the 8 character escape code sequences (but +// * neither does the SPARQL parser!) +// */ +// private char readChar() throws java.io.IOException { +// char c; +// +// sb.append(c = ReadByte()); +// +// if (c == '\\') { +// +// int backSlashCnt = 1; +// +// for (;;) // Read all the backslashes +// { +// +// try { +// sb.append(c=ReadByte()); +// if (c != '\\') { +// // found a non-backslash char. +// if ((c == 'u') && ((backSlashCnt & 1) == 1)) { +// if (--bufpos < 0) +// bufpos = bufsize - 1; +// +// break; +// } +// +// backup(backSlashCnt); +// return '\\'; +// } +// } catch (java.io.IOException e) { +// // We are returning one backslash so we should only +// // backup (count-1) +// if (backSlashCnt > 1) +// backup(backSlashCnt - 1); +// +// return '\\'; +// } +// +// backSlashCnt++; +// } +// +// // Here, we have seen an odd number of backslash's followed by a +// // 'u' +// try { +// while ((c = ReadByte()) == 'u') {} +// +// // Decode the code sequence. +// c = (char) (hexval(c) << 12 | hexval(ReadByte()) << 8 +// | hexval(ReadByte()) << 4 | hexval(ReadByte())); +// +// sb.append(c); +// +// } catch (java.io.IOException e) { +// +// throw new Error("Invalid escape character"); +// +// } +// +// if (backSlashCnt == 1) +// return c; +// else { +// backup(backSlashCnt - 1); +// return '\\'; +// } +// } else { +// return c; +// } +// } +// +// } +// +// /** +// * Apply code point escape sequences for anything that we need to escape. +// * For our purposes, this is just <code>"</code> and <code>></code>. +// * @param s +// * @return +// * +// * @see http://www.w3.org/TR/sparql11-query/#codepointEscape +// */ +// static String encodeEscapeSequences(final String s) { +// +// return s; +// +// } +// +// /** +// * Decode all code point escape sequences. Note that we need to decode more +// * than we encode since we are not responsible for the encoding when it +// * comes to the REST API, just the decoding. +// * +// * @param s +// * The string, which may have escape sequences encoded. +// * +// * @return The string with escape sequences decoded. +// * +// * @throws IllegalArgumentException +// * if the argument is <code>null</code>. +// * @throws IllegalArgumentException +// * if the argument is contains an ill-formed escape code +// * sequence. +// * +// * @see http://www.w3.org/TR/sparql11-query/#codepointEscape +// * +// * FIXME Implement encode/decode. +// */ +// static String decodeEscapeSequences(final String s) { +// +//// // Remove any escape sequences. +//// final StringBuilder sb = new StringBuilder(); +//// for (int i = 0; i < slen; i++) { +//// char ch = s.charAt(i); +//// if (ch == '\\') { +//// if (i + 1 == slen) +//// throw new IllegalArgumentException(s); +//// ch = s.charAt(i); +//// } +//// sb.append(ch); +//// } +//// final String t = sb.toString(); +// +// return s; +// +// } + + /** + * Decode a URI or Literal. + * + * @param s + * The value to be decoded. + * + * @return The URI or literal -or- <code>null</code> if the argument was + * <code>null</code>. + * + * @throws IllegalArgumentException + * if the request parameter could not be decoded as an RDF + * {@link Value}. + */ + public static Value decodeValue(final String s) { + + if(s == null) + return null; + +// final String s = decodeEscapeSequences(ss); + + final int slen = s.length(); + + if (slen == 0) + throw new IllegalArgumentException("<Empty String>"); + + final char ch = s.charAt(0); + + if(ch == '\"' || ch == '\'') { + + /* + * Literal. + */ + + final int closeQuotePos = s.lastIndexOf(ch); + + if (closeQuotePos == 0) + throw new IllegalArgumentException(s); + + final String label = s.substring(1, closeQuotePos); + + if (slen == closeQuotePos + 1) { + + /* + * Plain literal. + */ + + return new LiteralImpl(label); + + } + + final char ch2 = s.charAt(closeQuotePos + 1); + + if (ch2 == '@') { + + /* + * Language code literal. + */ + + final String languageCode = s.substring(closeQuotePos + 2); + + return new LiteralImpl(label, languageCode); + + } else if (ch2 == '^') { + + /* + * Datatype literal. + */ + + if (slen <= closeQuotePos + 2) + throw new IllegalArgumentException(s); + + if (s.charAt(closeQuotePos + 2) != '^') + throw new IllegalArgumentException(s); + + final String datatypeStr = s.substring(closeQuotePos + 3); + + final URI datatypeURI = decodeURI(datatypeStr); + + return new LiteralImpl(label,datatypeURI); + + } else { + + throw new IllegalArgumentException(s); + + } + + } else if (ch == '<') { + + /* + * URI + */ + + if (s.charAt(slen - 1) != '>') + throw new IllegalArgumentException(s); + + final String uriStr = s.substring(1, slen - 1); + + return new URIImpl(uriStr); + + } else { + + throw new IllegalArgumentException(s); + + } + + } + + /** + * Type safe variant for a {@link Resource}. + */ + public static Resource decodeResource(final String param) { + + final Value v = decodeValue(param); + + if (v == null || v instanceof Resource) + return (Resource) v; + + throw new IllegalArgumentException("Not a Resource: '" + param + "'"); + + } + + /** + * Type safe variant for a {@link URI}. + */ + public static URI decodeURI(final String param) { + + final Value v = decodeValue(param); + + if (v == null || v instanceof URI) + return (URI) v; + + throw new IllegalArgumentException("Not an URI: '" + param + "'"); + + } + + /** + * Encode an RDF {@link Value} as it should appear if used in a SPARQL + * query. E.g., a literal will look like <code>"abc"</code>, + * <code>"abc"@en</code> or + * <code>"3"^^xsd:int. A URI will look like <code><http://www.bigdata.com/></code> + * . + * + * @param v + * The value (optional). + * + * @return The encoded value -or- <code>null</code> if the argument is + * <code>null</code>. + * + * @throws IllegalArgumentException + * if the argument is a {@link BNode}. + */ + public static String encodeValue(final Value v) { + if(v == null) + return null; + if (v instanceof BNode) + throw new IllegalArgumentException(); + if (v instanceof URI) { + return "<" + v.stringValue() + ">"; + } + if (v instanceof Literal) { + final Literal lit = (Literal) v; + final StringBuilder sb = new StringBuilder(); + sb.append("\""); + sb.append(lit.getLabel()); + sb.append("\""); + if (lit.getLanguage() != null) { + sb.append("@"); + sb.append(lit.getLanguage()); + } + if (lit.getDatatype() != null) { + sb.append("^^"); + sb.append(encodeValue(lit.getDatatype())); + } + return sb.toString(); + } + throw new AssertionError(); + } + + public static Resource[] decodeResources(final String[] strings) { + if (strings == null || strings.length == 0) + return null; + + final Resource[] resources = new Resource[strings.length]; + for (int i = 0; i < strings.length; i++) { + resources[i] = decodeResource(strings[i]); + } + + return resources; + } + + public static String[] encodeValues(final Value[] values) { + if (values == null || values.length == 0) + return null; + + final String[] strings = new String[values.length]; + for (int i = 0; i < values.length; i++) { + strings[i] = encodeValue(values[i]); + } + + return strings; + } + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IMimeTypes.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IMimeTypes.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IMimeTypes.java 2012-12-22 18:14:00 UTC (rev 6799) @@ -0,0 +1,46 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sail.webapp.client; + +/** + * Common MIME types for dynamic content. + */ +public interface IMimeTypes { + + public String + MIME_TEXT_PLAIN = "text/plain", + MIME_TEXT_HTML = "text/html", +// MIME_TEXT_XML = "text/xml", + /** + * General purpose binary <code>application/octet-stream</code>. + */ + MIME_DEFAULT_BINARY = "application/octet-stream", + MIME_APPLICATION_XML = "application/xml", + MIME_TEXT_JAVASCRIPT = "text/javascript", + /** + * The traditional encoding of URL query parameters within a POST + * message body. + */ + MIME_APPLICATION_URL_ENCODED = "application/x-www-form-urlencoded"; + +} Modified: b... [truncated message content] |