This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tho...@us...> - 2014-09-25 21:52:59
|
Revision: 8660 http://sourceforge.net/p/bigdata/code/8660 Author: thompsonbry Date: 2014-09-25 21:52:55 +0000 (Thu, 25 Sep 2014) Log Message: ----------- Fix to scale-out DESCRIBE Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-09-25 19:00:48 UTC (rev 8659) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-09-25 21:52:55 UTC (rev 8660) @@ -106,6 +106,7 @@ import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.task.AbstractApiTask; import com.bigdata.relation.RelationSchema; +import com.bigdata.service.IBigdataFederation; import com.bigdata.sparse.ITPS; import com.bigdata.sparse.SparseRowStore; import com.bigdata.util.concurrent.DaemonThreadFactory; @@ -2350,15 +2351,17 @@ } - /*package*/ List<String> getNamespacesTx(final long tx) { + /*package*/ List<String> getNamespacesTx(long tx) { -// if (timestamp == ITx.READ_COMMITTED) { -// -// // Use the last commit point. -// timestamp = getIndexManager().getLastCommitTime(); -// -// } + final IIndexManager indexManager = getIndexManager(); + + if (tx == ITx.READ_COMMITTED && indexManager instanceof IBigdataFederation) { + // Use the last commit point for the federation *only*. + tx = getIndexManager().getLastCommitTime(); + + } + // the triple store namespaces. final List<String> namespaces = new LinkedList<String>(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-25 19:00:52
|
Revision: 8659 http://sourceforge.net/p/bigdata/code/8659 Author: thompsonbry Date: 2014-09-25 19:00:48 +0000 (Thu, 25 Sep 2014) Log Message: ----------- Unwinding some dependencies that were dragging in the HALoadBalancerServlet and hence the jetty ProxyServlet when the WAR is deployed to a tomcat container. There is still an issue with the workbench where it is specifying the servlet URL that includes the LBS. /bigdata/LBS/ When the servlet container is not jetty this does not resolve to anything and everything falls over. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-09-25 16:40:16 UTC (rev 8658) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-09-25 19:00:48 UTC (rev 8659) @@ -102,9 +102,7 @@ import com.bigdata.quorum.zk.ZKQuorumImpl; import com.bigdata.rdf.sail.CreateKBTask; import com.bigdata.rdf.sail.webapp.ConfigParams; -import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet; import com.bigdata.rdf.sail.webapp.NanoSparqlServer; -import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy; import com.bigdata.rwstore.RWStore; import com.bigdata.service.AbstractHATransactionService; import com.bigdata.service.jini.FakeLifeCycle; @@ -570,6 +568,22 @@ private volatile Server jettyServer; /** + * Exposed to the test suite. + */ + WebAppContext getWebAppContext() { + + final Server server = jettyServer; + + if (server == null) + throw new IllegalStateException(); + + final WebAppContext wac = NanoSparqlServer.getWebApp(server); + + return wac; + + } + + /** * Enum of the run states. The states are labeled by the goal of the run * state. */ @@ -4616,47 +4630,6 @@ } /** - * Change the {@link IHALoadBalancerPolicy}. - * <p> - * TODO There are some intrinsic problems with this method that should be - * resolved before exposing it as an administrative API on the - * {@link HAGlue} interface. - * <p> - * (1) This only applies to running instances of the - * {@link HALoadBalancerServlet}. If an instance is started after this - * method is called, it will run with the as-configured - * {@link IHALoadBalancerPolicy} instance of the one specified in the last - * invocation of this method. - * <p> - * (2) There are various race conditions that exist with respect to: (a) the - * atomic change over of the {@link IHALoadBalancerPolicy} during an - * in-flight request; and (b) the atomic destroy of the old policy once - * there are no more in-flight requests using that old policy. - * - * TODO Either the {@link IHALoadBalancerPolicy} needs to be serializable or - * we need to pass along the class name and the configuration parameters. - * For this case, the configuration should be set from the caller specified - * values rather than those potentially associated with <code>web.xml</code> - * , especially since <code>web.xml</code> might not even have the necessary - * configuration parameters defined for the caller specified policy. - */ - public void setHALoadBalancerPolicy(final IHALoadBalancerPolicy policy) { - - final Server server = this.jettyServer; - - if (server == null) - throw new IllegalStateException(); - - final WebAppContext wac = NanoSparqlServer.getWebApp(server); - - if (log.isInfoEnabled()) - log.info("Will set LBS: wac=" + wac + ", policy: " + policy); - - HALoadBalancerServlet.setLBSPolicy(wac.getServletContext(), policy); - - } - - /** * Conditionally create the default KB instance as identified in * <code>web.xml</code>. * Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2014-09-25 16:40:16 UTC (rev 8658) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2014-09-25 19:00:48 UTC (rev 8659) @@ -49,6 +49,7 @@ import net.jini.config.ConfigurationException; import org.apache.log4j.Logger; +import org.eclipse.jetty.webapp.WebAppContext; import com.bigdata.concurrent.FutureTaskMon; import com.bigdata.counters.PIDUtil; @@ -1444,6 +1445,36 @@ */ private AtomicReference<Throwable> lastRootCause = new AtomicReference<Throwable>(); + /** + * Change the {@link IHALoadBalancerPolicy}. + * <p> + * TODO There are some intrinsic problems with this method that should + * be resolved before exposing it as an administrative API on the + * {@link HAGlue} interface. + * <p> + * (1) This only applies to running instances of the + * {@link HALoadBalancerServlet}. If an instance is started after this + * method is called, it will run with the as-configured + * {@link IHALoadBalancerPolicy} instance of the one specified in the + * last invocation of this method. + * <p> + * (2) There are various race conditions that exist with respect to: (a) + * the atomic change over of the {@link IHALoadBalancerPolicy} during an + * in-flight request; and (b) the atomic destroy of the old policy once + * there are no more in-flight requests using that old policy. + * <p> + * (3) Exposing this method is just begging for trouble with the WAR + * artifact when deployed under a non-jetty container since it will drag + * in the jetty ProxyServlet. + * + * TODO Either the {@link IHALoadBalancerPolicy} needs to be + * serializable or we need to pass along the class name and the + * configuration parameters. For this case, the configuration should be + * set from the caller specified values rather than those potentially + * associated with <code>web.xml</code> , especially since + * <code>web.xml</code> might not even have the necessary configuration + * parameters defined for the caller specified policy. + */ @Override public void setHALoadBalancerPolicy(final IHALoadBalancerPolicy policy) throws IOException { @@ -1454,8 +1485,15 @@ if (log.isInfoEnabled()) log.info("Will set LBS policy: " + policy); - getHAJournalServer().setHALoadBalancerPolicy(policy); + final HAJournalServer haJournalServer = getHAJournalServer(); + + final WebAppContext wac = haJournalServer.getWebAppContext(); + if (log.isInfoEnabled()) + log.info("Will set LBS: wac=" + wac + ", policy: " + policy); + + HALoadBalancerServlet.setLBSPolicy(wac.getServletContext(), policy); + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-09-25 16:40:16 UTC (rev 8658) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-09-25 19:00:48 UTC (rev 8659) @@ -45,7 +45,6 @@ import com.bigdata.journal.IIndexManager; import com.bigdata.quorum.AbstractQuorum; import com.bigdata.rdf.sail.webapp.client.IMimeTypes; -import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy; import com.bigdata.rdf.task.AbstractApiTask; /** @@ -77,20 +76,24 @@ /** * The {@link ServletContext} attribute whose value is the prefix for the - * {@link HALoadBalancerServlet} iff it is running. + * HALoadBalancerServlet (DO NOT LINK JAVADOC) iff it is running. * <p> - * Note: Do NOT reference the <code>HALoadBalancerServlet</code> here. It - * will drag in the jetty dependencies and that breaks the tomcat WAR - * deployment. + * Note: Do NOT reference the <code>HALoadBalancerServlet</code> or anything + * in the <code>com.bigdata.rdf.sail.webapp.lbs</code> package here. It will + * drag in the jetty dependencies and that breaks the tomcat WAR deployment. */ static final String ATTRIBUTE_LBS_PREFIX = "com.bigdata.rdf.sail.webapp.HALoadBalancerServlet.prefix"; /** * The {@link ServletContext} attribute that is managed by the - * {@link HALoadBalancerServlet} and which maintains a collection of the - * active instances of that servlet. This is used to administer the - * {@link IHALoadBalancerPolicy} associated with the load balancer servlet - * instances. + * HALoadBalancerServlet (DO NOT LINK JAVADOC) and which maintains a + * collection of the active instances of that servlet. This is used to + * administer the IHALoadBalancerPolicy associated with the load balancer + * servlet instances. + * <p> + * Note: Do NOT reference the <code>HALoadBalancerServlet</code> or anything + * in the <code>com.bigdata.rdf.sail.webapp.lbs</code> package here. It will + * drag in the jetty dependencies and that breaks the tomcat WAR deployment. */ static final String ATTRIBUTE_LBS_INSTANCES = "com.bigdata.rdf.sail.webapp.HALoadBalancerServlet.instances"; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-09-25 16:40:16 UTC (rev 8658) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-09-25 19:00:48 UTC (rev 8659) @@ -26,6 +26,8 @@ import java.io.IOException; import java.io.PrintWriter; import java.io.StringWriter; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; import java.math.BigInteger; import java.net.InetSocketAddress; import java.security.DigestException; @@ -36,6 +38,7 @@ import java.util.UUID; import java.util.concurrent.TimeoutException; +import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -330,12 +333,35 @@ // HA Load Balancer. { - - p.text("Service: LBSPolicy=") - .node("span").attr("id", "lbs-policy") - .text(HALoadBalancerServlet.toString(req - .getServletContext())).close() - .node("br").close(); + /* + * Note: MUST NOT HAVE A DIRECT REFERENCE TO THIS CLASS OR + * IT WILL BREAK THE WAR ARTIFACT WHEN DEPLOYED TO A + * NON-JETTY CONTAINER SINCE THE JETTY ProxyServlet WILL NOT + * BE FOUND. + */ + try { + final Class<?> cls = Class + .forName("com.bigdata.rdf.sail.webapp.HALoadBalancerServlet"); + final Method m = cls.getMethod("toString", + new Class[] { ServletContext.class }); + final String rep = (String) m.invoke(null/* static */, + new Object[] { req.getServletContext() }); + p.text("Service: LBSPolicy=").node("span") + .attr("id", "lbs-policy").text(rep).close() + .node("br").close(); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } catch (NoSuchMethodException e) { + throw new RuntimeException(e); + } catch (SecurityException e) { + throw new RuntimeException(e); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } catch (IllegalArgumentException e) { + throw new RuntimeException(e); + } catch (InvocationTargetException e) { + throw new RuntimeException(e); + } } // if(true) { // /* This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-25 16:40:19
|
Revision: 8658 http://sourceforge.net/p/bigdata/code/8658 Author: thompsonbry Date: 2014-09-25 16:40:16 +0000 (Thu, 25 Sep 2014) Log Message: ----------- Adding 1.3.2 release notes Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_2.txt Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_2.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_2.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_2.txt 2014-09-25 16:40:16 UTC (rev 8658) @@ -0,0 +1,539 @@ +This is a minor release of bigdata(R). + +Bigdata is a horizontally-scaled, open-source architecture for indexed data with an emphasis on RDF capable of loading 1B triples in under one hour on a 15 node cluster. Bigdata operates in both a single machine mode (Journal), highly available replication cluster mode (HAJournalServer), and a horizontally sharded cluster mode (BigdataFederation). The Journal provides fast scalable ACID indexed storage for very large data sets, up to 50 billion triples / quads. The HAJournalServer adds replication, online backup, horizontal scaling of query, and high availability. The federation provides fast scalable shard-wise parallel indexed storage using dynamic sharding and shard-wise ACID updates and incremental cluster size growth. Both platforms support fully concurrent readers with snapshot isolation. + +Distributed processing offers greater throughput but does not reduce query or update latency. Choose the Journal when the anticipated scale and throughput requirements permit. Choose the HAJournalServer for high availability and linear scaling in query throughput. Choose the BigdataFederation when the administrative and machine overhead associated with operating a cluster is an acceptable tradeoff to have essentially unlimited data scaling and throughput. + +See [1,2,8] for instructions on installing bigdata(R), [4] for the javadoc, and [3,5,6] for news, questions, and the latest developments. For more information about SYSTAP, LLC and bigdata, see [7]. + +Starting with the 1.0.0 release, we offer a WAR artifact [8] for easy installation of the single machine RDF database. For custom development and cluster installations we recommend checking out the code from SVN using the tag for this release. The code will build automatically under eclipse. You can also build the code using the ant script. The cluster installer requires the use of the ant script. + +Starting with the 1.3.0 release, we offer a tarball artifact [10] for easy installation of the HA replication cluster. + +You can download the WAR (standalone) or HA artifacts from: + +http://sourceforge.net/projects/bigdata/ + +You can checkout this release from: + +https://svn.code.sf.net/p/bigdata/code/tags/BIGDATA_RELEASE_1_3_1 + +Critical or otherwise of note in this minor release: + +- Stored query facility (#989). +- Improved locality for small allocation slots (#986). +- Improved scalability for RWStore (#936). +- Various improvements for the workbench. +- Various improvements for property graphs. +- Critical bug fix for hasStatements() for unisolated indices (#855, #1005). +- Critical bug fix for ConcurrentWeakValueHashMap (#1004). +- Critical bug fix for query timeouts (#772, #865). +- Critical bug fix for RWStore (#973). +- Security fix for Apache commons-fileupload (#1010). + +New features in 1.3.x: + +- Java 7 is now required. +- High availability [10]. +- High availability load balancer. +- New RDF/SPARQL workbench. +- Blueprints API. +- RDF Graph Mining Service (GASService) [12]. +- Reification Done Right (RDR) support [11]. +- Property Path performance enhancements. +- Plus numerous other bug fixes and performance enhancements. + +Feature summary: + +- Highly Available Replication Clusters (HAJournalServer [10]) +- Single machine data storage to ~50B triples/quads (RWStore); +- Clustered data storage is essentially unlimited (BigdataFederation); +- Simple embedded and/or webapp deployment (NanoSparqlServer); +- Triples, quads, or triples with provenance (SIDs); +- Fast RDFS+ inference and truth maintenance; +- Fast 100% native SPARQL 1.1 evaluation; +- Integrated "analytic" query package; +- %100 Java memory manager leverages the JVM native heap (no GC); + +Road map [3]: + +- Column-wise indexing; +- Runtime Query Optimizer for quads; +- Performance optimization for scale-out clusters; and +- Simplified deployment, configuration, and administration for scale-out clusters. + +Change log: + + Note: Versions with (*) MAY require data migration. For details, see [9]. + +1.3.2: + +- http://trac.bigdata.com/ticket/999 (Extend BigdataSailFactory to take arbitrary properties) +- http://trac.bigdata.com/ticket/998 (SPARQL Update through BigdataGraph) +- http://trac.bigdata.com/ticket/996 (Add custom prefix support for query results) +- http://trac.bigdata.com/ticket/995 (Allow general purpose SPARQL queries through BigdataGraph) +- http://trac.bigdata.com/ticket/992 (Deadlock between AbstractRunningQuery.cancel(), QueryLog.log(), and ArbitraryLengthPathTask) +- http://trac.bigdata.com/ticket/990 (Query hints not recognized in FILTERs) +- http://trac.bigdata.com/ticket/989 (Stored query service) +- http://trac.bigdata.com/ticket/988 (Bad performance for FILTER EXISTS) +- http://trac.bigdata.com/ticket/987 (maven build is broken) +- http://trac.bigdata.com/ticket/986 (Improve locality for small allocation slots) +- http://trac.bigdata.com/ticket/985 (Deadlock in BigdataTriplePatternMaterializer) +- http://trac.bigdata.com/ticket/975 (HA Health Status Page) +- http://trac.bigdata.com/ticket/974 (Name2Addr.indexNameScan(prefix) uses scan + filter) +- http://trac.bigdata.com/ticket/973 (RWStore.commit() should be more defensive) +- http://trac.bigdata.com/ticket/971 (Clarify HTTP Status codes for CREATE NAMESPACE operation) +- http://trac.bigdata.com/ticket/968 (no link to wiki from workbench) +- http://trac.bigdata.com/ticket/966 (Failed to get namespace under concurrent update) +- http://trac.bigdata.com/ticket/965 (Can not run LBS mode with HA1 setup) +- http://trac.bigdata.com/ticket/961 (Clone/modify namespace to create a new one) +- http://trac.bigdata.com/ticket/960 (Export namespace properties in XML/Java properties text format) +- http://trac.bigdata.com/ticket/938 (HA Load Balancer) +- http://trac.bigdata.com/ticket/936 (Support larger metabits allocations) +- http://trac.bigdata.com/ticket/932 (Bigdata/Rexster integration) +- http://trac.bigdata.com/ticket/919 (Formatted Layout for Status pages) +- http://trac.bigdata.com/ticket/899 (REST API Query Cancellation) +- http://trac.bigdata.com/ticket/885 (Panels do not appear on startup in Firefox) +- http://trac.bigdata.com/ticket/884 (Executing a new query should clear the old query results from the console) +- http://trac.bigdata.com/ticket/882 (Abbreviate URIs that can be namespaced with one of the defined common namespaces) +- http://trac.bigdata.com/ticket/880 (Can't explore an absolute URI with < >) +- http://trac.bigdata.com/ticket/878 (Explore page looks weird when empty) +- http://trac.bigdata.com/ticket/873 (Allow user to go use browser back & forward buttons to view explore history) +- http://trac.bigdata.com/ticket/865 (OutOfMemoryError instead of Timeout for SPARQL Property Paths) +- http://trac.bigdata.com/ticket/858 (Change explore URLs to include URI being clicked so user can see what they've clicked on before) +- http://trac.bigdata.com/ticket/855 (AssertionError: Child does not have persistent identity) +- http://trac.bigdata.com/ticket/850 (Search functionality in workbench) +- http://trac.bigdata.com/ticket/847 (Query results panel should recognize well known namespaces for easier reading) +- http://trac.bigdata.com/ticket/845 (Display the properties for a namespace) +- http://trac.bigdata.com/ticket/843 (Create new tabs for status & performance counters, and add per namespace service/VoID description links) +- http://trac.bigdata.com/ticket/837 (Configurator for new namespaces) +- http://trac.bigdata.com/ticket/836 (Allow user to create namespace in the workbench) +- http://trac.bigdata.com/ticket/830 (Output RDF data from queries in table format) +- http://trac.bigdata.com/ticket/829 (Export query results) +- http://trac.bigdata.com/ticket/828 (Save selected namespace in browser) +- http://trac.bigdata.com/ticket/827 (Explore tab in workbench) +- http://trac.bigdata.com/ticket/826 (Create shortcut to execute load/query) +- http://trac.bigdata.com/ticket/823 (Disable textarea when a large file is selected) +- http://trac.bigdata.com/ticket/820 (Allow non-file:// URLs to be loaded) +- http://trac.bigdata.com/ticket/819 (Retrieve default namespace on page load) +- http://trac.bigdata.com/ticket/772 (Query timeout only checked at operator start/stop) +- http://trac.bigdata.com/ticket/765 (order by expr skips invalid expressions) +- http://trac.bigdata.com/ticket/587 (JSP page to configure KBs) +- http://trac.bigdata.com/ticket/343 (Stochastic assert in AbstractBTree#writeNodeOrLeaf() in CI) +- http://trac.bigdata.com/ticket/1010 (Upgrade apache http components to 1.3.1 (security)) +- http://trac.bigdata.com/ticket/1005 (Invalidate BTree objects if error occurs during eviction) +- http://trac.bigdata.com/ticket/1004 (Concurrent binding problem) +- http://trac.bigdata.com/ticket/1002 (Concurrency issues in JVMHashJoinUtility caused by MAX_PARALLEL query hint override) +- http://trac.bigdata.com/ticket/1000 (Add configuration option to turn off bottom-up evaluation) + +1.3.1: + +- http://trac.bigdata.com/ticket/242 (Deadlines do not play well with GROUP_BY, ORDER_BY, etc.) +- http://trac.bigdata.com/ticket/256 (Amortize RTO cost) +- http://trac.bigdata.com/ticket/257 (Support BOP fragments in the RTO.) +- http://trac.bigdata.com/ticket/258 (Integrate RTO into SAIL) +- http://trac.bigdata.com/ticket/259 (Dynamically increase RTO sampling limit.) +- http://trac.bigdata.com/ticket/526 (Reification done right) +- http://trac.bigdata.com/ticket/580 (Problem with the bigdata RDF/XML parser with sids) +- http://trac.bigdata.com/ticket/622 (NSS using jetty+windows can lose connections (windows only; jdk 6/7 bug)) +- http://trac.bigdata.com/ticket/624 (HA Load Balancer) +- http://trac.bigdata.com/ticket/629 (Graph processing API) +- http://trac.bigdata.com/ticket/721 (Support HA1 configurations) +- http://trac.bigdata.com/ticket/730 (Allow configuration of embedded NSS jetty server using jetty-web.xml) +- http://trac.bigdata.com/ticket/759 (multiple filters interfere) +- http://trac.bigdata.com/ticket/763 (Stochastic results with Analytic Query Mode) +- http://trac.bigdata.com/ticket/774 (Converge on Java 7.) +- http://trac.bigdata.com/ticket/779 (Resynchronization of socket level write replication protocol (HA)) +- http://trac.bigdata.com/ticket/780 (Incremental or asynchronous purge of HALog files) +- http://trac.bigdata.com/ticket/782 (Wrong serialization version) +- http://trac.bigdata.com/ticket/784 (Describe Limit/offset don't work as expected) +- http://trac.bigdata.com/ticket/787 (Update documentations and samples, they are OUTDATED) +- http://trac.bigdata.com/ticket/788 (Name2Addr does not report all root causes if the commit fails.) +- http://trac.bigdata.com/ticket/789 (ant task to build sesame fails, docs for setting up bigdata for sesame are ancient) +- http://trac.bigdata.com/ticket/790 (should not be pruning any children) +- http://trac.bigdata.com/ticket/791 (Clean up query hints) +- http://trac.bigdata.com/ticket/793 (Explain reports incorrect value for opCount) +- http://trac.bigdata.com/ticket/796 (Filter assigned to sub-query by query generator is dropped from evaluation) +- http://trac.bigdata.com/ticket/797 (add sbt setup to getting started wiki) +- http://trac.bigdata.com/ticket/798 (Solution order not always preserved) +- http://trac.bigdata.com/ticket/799 (mis-optimation of quad pattern vs triple pattern) +- http://trac.bigdata.com/ticket/802 (Optimize DatatypeFactory instantiation in DateTimeExtension) +- http://trac.bigdata.com/ticket/803 (prefixMatch does not work in full text search) +- http://trac.bigdata.com/ticket/804 (update bug deleting quads) +- http://trac.bigdata.com/ticket/806 (Incorrect AST generated for OPTIONAL { SELECT }) +- http://trac.bigdata.com/ticket/808 (Wildcard search in bigdata for type suggessions) +- http://trac.bigdata.com/ticket/810 (Expose GAS API as SPARQL SERVICE) +- http://trac.bigdata.com/ticket/815 (RDR query does too much work) +- http://trac.bigdata.com/ticket/816 (Wildcard projection ignores variables inside a SERVICE call.) +- http://trac.bigdata.com/ticket/817 (Unexplained increase in journal size) +- http://trac.bigdata.com/ticket/821 (Reject large files, rather then storing them in a hidden variable) +- http://trac.bigdata.com/ticket/831 (UNION with filter issue) +- http://trac.bigdata.com/ticket/841 (Using "VALUES" in a query returns lexical error) +- http://trac.bigdata.com/ticket/848 (Fix SPARQL Results JSON writer to write the RDR syntax) +- http://trac.bigdata.com/ticket/849 (Create writers that support the RDR syntax) +- http://trac.bigdata.com/ticket/851 (RDR GAS interface) +- http://trac.bigdata.com/ticket/852 (RemoteRepository.cancel() does not consume the HTTP response entity.) +- http://trac.bigdata.com/ticket/853 (Follower does not accept POST of idempotent operations (HA)) +- http://trac.bigdata.com/ticket/854 (Allow override of maximum length before converting an HTTP GET to an HTTP POST) +- http://trac.bigdata.com/ticket/855 (AssertionError: Child does not have persistent identity) +- http://trac.bigdata.com/ticket/862 (Create parser for JSON SPARQL Results) +- http://trac.bigdata.com/ticket/863 (HA1 commit failure) +- http://trac.bigdata.com/ticket/866 (Batch remove API for the SAIL) +- http://trac.bigdata.com/ticket/867 (NSS concurrency problem with list namespaces and create namespace) +- http://trac.bigdata.com/ticket/869 (HA5 test suite) +- http://trac.bigdata.com/ticket/872 (Full text index range count optimization) +- http://trac.bigdata.com/ticket/874 (FILTER not applied when there is UNION in the same join group) +- http://trac.bigdata.com/ticket/876 (When I upload a file I want to see the filename.) +- http://trac.bigdata.com/ticket/877 (RDF Format selector is invisible) +- http://trac.bigdata.com/ticket/883 (CANCEL Query fails on non-default kb namespace on HA follower.) +- http://trac.bigdata.com/ticket/886 (Provide workaround for bad reverse DNS setups.) +- http://trac.bigdata.com/ticket/887 (BIND is leaving a variable unbound) +- http://trac.bigdata.com/ticket/892 (HAJournalServer does not die if zookeeper is not running) +- http://trac.bigdata.com/ticket/893 (large sparql insert optimization slow?) +- http://trac.bigdata.com/ticket/894 (unnecessary synchronization) +- http://trac.bigdata.com/ticket/895 (stack overflow in populateStatsMap) +- http://trac.bigdata.com/ticket/902 (Update Basic Bigdata Chef Cookbook) +- http://trac.bigdata.com/ticket/904 (AssertionError: PropertyPathNode got to ASTJoinOrderByType.optimizeJoinGroup) +- http://trac.bigdata.com/ticket/905 (unsound combo query optimization: union + filter) +- http://trac.bigdata.com/ticket/906 (DC Prefix Button Appends "</li>") +- http://trac.bigdata.com/ticket/907 (Add a quick-start ant task for the BD Server "ant start") +- http://trac.bigdata.com/ticket/912 (Provide a configurable IAnalyzerFactory) +- http://trac.bigdata.com/ticket/913 (Blueprints API Implementation) +- http://trac.bigdata.com/ticket/914 (Settable timeout on SPARQL Query (REST API)) +- http://trac.bigdata.com/ticket/915 (DefaultAnalyzerFactory issues) +- http://trac.bigdata.com/ticket/920 (Content negotiation orders accept header scores in reverse) +- http://trac.bigdata.com/ticket/939 (NSS does not start from command line: bigdata-war/src not found.) +- http://trac.bigdata.com/ticket/940 (ProxyServlet in web.xml breaks tomcat WAR (HA LBS) + +1.3.0: + +- http://trac.bigdata.com/ticket/530 (Journal HA) +- http://trac.bigdata.com/ticket/621 (Coalesce write cache records and install reads in cache) +- http://trac.bigdata.com/ticket/623 (HA TXS) +- http://trac.bigdata.com/ticket/639 (Remove triple-buffering in RWStore) +- http://trac.bigdata.com/ticket/645 (HA backup) +- http://trac.bigdata.com/ticket/646 (River not compatible with newer 1.6.0 and 1.7.0 JVMs) +- http://trac.bigdata.com/ticket/648 (Add a custom function to use full text index for filtering.) +- http://trac.bigdata.com/ticket/651 (RWS test failure) +- http://trac.bigdata.com/ticket/652 (Compress write cache blocks for replication and in HALogs) +- http://trac.bigdata.com/ticket/662 (Latency on followers during commit on leader) +- http://trac.bigdata.com/ticket/663 (Issue with OPTIONAL blocks) +- http://trac.bigdata.com/ticket/664 (RWStore needs post-commit protocol) +- http://trac.bigdata.com/ticket/665 (HA3 LOAD non-responsive with node failure) +- http://trac.bigdata.com/ticket/666 (Occasional CI deadlock in HALogWriter testConcurrentRWWriterReader) +- http://trac.bigdata.com/ticket/670 (Accumulating HALog files cause latency for HA commit) +- http://trac.bigdata.com/ticket/671 (Query on follower fails during UPDATE on leader) +- http://trac.bigdata.com/ticket/673 (DGC in release time consensus protocol causes native thread leak in HAJournalServer at each commit) +- http://trac.bigdata.com/ticket/674 (WCS write cache compaction causes errors in RWS postHACommit()) +- http://trac.bigdata.com/ticket/676 (Bad patterns for timeout computations) +- http://trac.bigdata.com/ticket/677 (HA deadlock under UPDATE + QUERY) +- http://trac.bigdata.com/ticket/678 (DGC Thread and Open File Leaks: sendHALogForWriteSet()) +- http://trac.bigdata.com/ticket/679 (HAJournalServer can not restart due to logically empty log file) +- http://trac.bigdata.com/ticket/681 (HAJournalServer deadlock: pipelineRemove() and getLeaderId()) +- http://trac.bigdata.com/ticket/684 (Optimization with skos altLabel) +- http://trac.bigdata.com/ticket/686 (Consensus protocol does not detect clock skew correctly) +- http://trac.bigdata.com/ticket/687 (HAJournalServer Cache not populated) +- http://trac.bigdata.com/ticket/689 (Missing URL encoding in RemoteRepositoryManager) +- http://trac.bigdata.com/ticket/690 (Error when using the alias "a" instead of rdf:type for a multipart insert) +- http://trac.bigdata.com/ticket/691 (Failed to re-interrupt thread in HAJournalServer) +- http://trac.bigdata.com/ticket/692 (Failed to re-interrupt thread) +- http://trac.bigdata.com/ticket/693 (OneOrMorePath SPARQL property path expression ignored) +- http://trac.bigdata.com/ticket/694 (Transparently cancel update/query in RemoteRepository) +- http://trac.bigdata.com/ticket/695 (HAJournalServer reports "follower" but is in SeekConsensus and is not participating in commits.) +- http://trac.bigdata.com/ticket/701 (Problems in BackgroundTupleResult) +- http://trac.bigdata.com/ticket/702 (InvocationTargetException on /namespace call) +- http://trac.bigdata.com/ticket/704 (ask does not return json) +- http://trac.bigdata.com/ticket/705 (Race between QueryEngine.putIfAbsent() and shutdownNow()) +- http://trac.bigdata.com/ticket/706 (MultiSourceSequentialCloseableIterator.nextSource() can throw NPE) +- http://trac.bigdata.com/ticket/707 (BlockingBuffer.close() does not unblock threads) +- http://trac.bigdata.com/ticket/708 (BIND heisenbug - race condition on select query with BIND) +- http://trac.bigdata.com/ticket/711 (sparql protocol: mime type application/sparql-query) +- http://trac.bigdata.com/ticket/712 (SELECT ?x { OPTIONAL { ?x eg:doesNotExist eg:doesNotExist } } incorrect) +- http://trac.bigdata.com/ticket/715 (Interrupt of thread submitting a query for evaluation does not always terminate the AbstractRunningQuery) +- http://trac.bigdata.com/ticket/716 (Verify that IRunningQuery instances (and nested queries) are correctly cancelled when interrupted) +- http://trac.bigdata.com/ticket/718 (HAJournalServer needs to handle ZK client connection loss) +- http://trac.bigdata.com/ticket/720 (HA3 simultaneous service start failure) +- http://trac.bigdata.com/ticket/723 (HA asynchronous tasks must be canceled when invariants are changed) +- http://trac.bigdata.com/ticket/725 (FILTER EXISTS in subselect) +- http://trac.bigdata.com/ticket/726 (Logically empty HALog for committed transaction) +- http://trac.bigdata.com/ticket/727 (DELETE/INSERT fails with OPTIONAL non-matching WHERE) +- http://trac.bigdata.com/ticket/728 (Refactor to create HAClient) +- http://trac.bigdata.com/ticket/729 (ant bundleJar not working) +- http://trac.bigdata.com/ticket/731 (CBD and Update leads to 500 status code) +- http://trac.bigdata.com/ticket/732 (describe statement limit does not work) +- http://trac.bigdata.com/ticket/733 (Range optimizer not optimizing Slice service) +- http://trac.bigdata.com/ticket/734 (two property paths interfere) +- http://trac.bigdata.com/ticket/736 (MIN() malfunction) +- http://trac.bigdata.com/ticket/737 (class cast exception) +- http://trac.bigdata.com/ticket/739 (Inconsistent treatment of bind and optional property path) +- http://trac.bigdata.com/ticket/741 (ctc-striterators should build as independent top-level project (Apache2)) +- http://trac.bigdata.com/ticket/743 (AbstractTripleStore.destroy() does not filter for correct prefix) +- http://trac.bigdata.com/ticket/746 (Assertion error) +- http://trac.bigdata.com/ticket/747 (BOUND bug) +- http://trac.bigdata.com/ticket/748 (incorrect join with subselect renaming vars) +- http://trac.bigdata.com/ticket/754 (Failure to setup SERVICE hook and changeLog for Unisolated and Read/Write connections) +- http://trac.bigdata.com/ticket/755 (Concurrent QuorumActors can interfere leading to failure to progress) +- http://trac.bigdata.com/ticket/756 (order by and group_concat) +- http://trac.bigdata.com/ticket/760 (Code review on 2-phase commit protocol) +- http://trac.bigdata.com/ticket/764 (RESYNC failure (HA)) +- http://trac.bigdata.com/ticket/770 (alpp ordering) +- http://trac.bigdata.com/ticket/772 (Query timeout only checked at operator start/stop.) +- http://trac.bigdata.com/ticket/776 (Closed as duplicate of #490) +- http://trac.bigdata.com/ticket/778 (HA Leader fail results in transient problem with allocations on other services) +- http://trac.bigdata.com/ticket/783 (Operator Alerts (HA)) + +1.2.4: + +- http://trac.bigdata.com/ticket/777 (ConcurrentModificationException in ASTComplexOptionalOptimizer) + +1.2.3: + +- http://trac.bigdata.com/ticket/168 (Maven Build) +- http://trac.bigdata.com/ticket/196 (Journal leaks memory). +- http://trac.bigdata.com/ticket/235 (Occasional deadlock in CI runs in com.bigdata.io.writecache.TestAll) +- http://trac.bigdata.com/ticket/312 (CI (mock) quorums deadlock) +- http://trac.bigdata.com/ticket/405 (Optimize hash join for subgroups with no incoming bound vars.) +- http://trac.bigdata.com/ticket/412 (StaticAnalysis#getDefinitelyBound() ignores exogenous variables.) +- http://trac.bigdata.com/ticket/485 (RDFS Plus Profile) +- http://trac.bigdata.com/ticket/495 (SPARQL 1.1 Property Paths) +- http://trac.bigdata.com/ticket/519 (Negative parser tests) +- http://trac.bigdata.com/ticket/531 (SPARQL UPDATE for SOLUTION SETS) +- http://trac.bigdata.com/ticket/535 (Optimize JOIN VARS for Sub-Selects) +- http://trac.bigdata.com/ticket/555 (Support PSOutputStream/InputStream at IRawStore) +- http://trac.bigdata.com/ticket/559 (Use RDFFormat.NQUADS as the format identifier for the NQuads parser) +- http://trac.bigdata.com/ticket/570 (MemoryManager Journal does not implement all methods). +- http://trac.bigdata.com/ticket/575 (NSS Admin API) +- http://trac.bigdata.com/ticket/577 (DESCRIBE with OFFSET/LIMIT needs to use sub-select) +- http://trac.bigdata.com/ticket/578 (Concise Bounded Description (CBD)) +- http://trac.bigdata.com/ticket/579 (CONSTRUCT should use distinct SPO filter) +- http://trac.bigdata.com/ticket/583 (VoID in ServiceDescription) +- http://trac.bigdata.com/ticket/586 (RWStore immedateFree() not removing Checkpoint addresses from the historical index cache.) +- http://trac.bigdata.com/ticket/590 (nxparser fails with uppercase language tag) +- http://trac.bigdata.com/ticket/592 (Optimize RWStore allocator sizes) +- http://trac.bigdata.com/ticket/593 (Ugrade to Sesame 2.6.10) +- http://trac.bigdata.com/ticket/594 (WAR was deployed using TRIPLES rather than QUADS by default) +- http://trac.bigdata.com/ticket/596 (Change web.xml parameter names to be consistent with Jini/River) +- http://trac.bigdata.com/ticket/597 (SPARQL UPDATE LISTENER) +- http://trac.bigdata.com/ticket/598 (B+Tree branching factor and HTree addressBits are confused in their NodeSerializer implementations) +- http://trac.bigdata.com/ticket/599 (BlobIV for blank node : NotMaterializedException) +- http://trac.bigdata.com/ticket/600 (BlobIV collision counter hits false limit.) +- http://trac.bigdata.com/ticket/601 (Log uncaught exceptions) +- http://trac.bigdata.com/ticket/602 (RWStore does not discard logged deletes on reset()) +- http://trac.bigdata.com/ticket/607 (History service / index) +- http://trac.bigdata.com/ticket/608 (LOG BlockingBuffer not progressing at INFO or lower level) +- http://trac.bigdata.com/ticket/609 (bigdata-ganglia is required dependency for Journal) +- http://trac.bigdata.com/ticket/611 (The code that processes SPARQL Update has a typo) +- http://trac.bigdata.com/ticket/612 (Bigdata scale-up depends on zookeper) +- http://trac.bigdata.com/ticket/613 (SPARQL UPDATE response inlines large DELETE or INSERT triple graphs) +- http://trac.bigdata.com/ticket/614 (static join optimizer does not get ordering right when multiple tails share vars with ancestry) +- http://trac.bigdata.com/ticket/615 (AST2BOpUtility wraps UNION with an unnecessary hash join) +- http://trac.bigdata.com/ticket/616 (Row store read/update not isolated on Journal) +- http://trac.bigdata.com/ticket/617 (Concurrent KB create fails with "No axioms defined?") +- http://trac.bigdata.com/ticket/618 (DirectBufferPool.poolCapacity maximum of 2GB) +- http://trac.bigdata.com/ticket/619 (RemoteRepository class should use application/x-www-form-urlencoded for large POST requests) +- http://trac.bigdata.com/ticket/620 (UpdateServlet fails to parse MIMEType when doing conneg.) +- http://trac.bigdata.com/ticket/626 (Expose performance counters for read-only indices) +- http://trac.bigdata.com/ticket/627 (Environment variable override for NSS properties file) +- http://trac.bigdata.com/ticket/628 (Create a bigdata-client jar for the NSS REST API) +- http://trac.bigdata.com/ticket/631 (ClassCastException in SIDs mode query) +- http://trac.bigdata.com/ticket/632 (NotMaterializedException when a SERVICE call needs variables that are provided as query input bindings) +- http://trac.bigdata.com/ticket/633 (ClassCastException when binding non-uri values to a variable that occurs in predicate position) +- http://trac.bigdata.com/ticket/638 (Change DEFAULT_MIN_RELEASE_AGE to 1ms) +- http://trac.bigdata.com/ticket/640 (Conditionally rollback() BigdataSailConnection if dirty) +- http://trac.bigdata.com/ticket/642 (Property paths do not work inside of exists/not exists filters) +- http://trac.bigdata.com/ticket/643 (Add web.xml parameters to lock down public NSS end points) +- http://trac.bigdata.com/ticket/644 (Bigdata2Sesame2BindingSetIterator can fail to notice asynchronous close()) +- http://trac.bigdata.com/ticket/650 (Can not POST RDF to a graph using REST API) +- http://trac.bigdata.com/ticket/654 (Rare AssertionError in WriteCache.clearAddrMap()) +- http://trac.bigdata.com/ticket/655 (SPARQL REGEX operator does not perform case-folding correctly for Unicode data) +- http://trac.bigdata.com/ticket/656 (InFactory bug when IN args consist of a single literal) +- http://trac.bigdata.com/ticket/647 (SIDs mode creates unnecessary hash join for GRAPH group patterns) +- http://trac.bigdata.com/ticket/667 (Provide NanoSparqlServer initialization hook) +- http://trac.bigdata.com/ticket/669 (Doubly nested subqueries yield no results with LIMIT) +- http://trac.bigdata.com/ticket/675 (Flush indices in parallel during checkpoint to reduce IO latency) +- http://trac.bigdata.com/ticket/682 (AtomicRowFilter UnsupportedOperationException) + +1.2.2: + +- http://trac.bigdata.com/ticket/586 (RWStore immedateFree() not removing Checkpoint addresses from the historical index cache.) +- http://trac.bigdata.com/ticket/602 (RWStore does not discard logged deletes on reset()) +- http://trac.bigdata.com/ticket/603 (Prepare critical maintenance release as branch of 1.2.1) + +1.2.1: + +- http://trac.bigdata.com/ticket/533 (Review materialization for inline IVs) +- http://trac.bigdata.com/ticket/539 (NotMaterializedException with REGEX and Vocab) +- http://trac.bigdata.com/ticket/540 (SPARQL UPDATE using NSS via index.html) +- http://trac.bigdata.com/ticket/541 (MemoryManaged backed Journal mode) +- http://trac.bigdata.com/ticket/546 (Index cache for Journal) +- http://trac.bigdata.com/ticket/549 (BTree can not be cast to Name2Addr (MemStore recycler)) +- http://trac.bigdata.com/ticket/550 (NPE in Leaf.getKey() : root cause was user error) +- http://trac.bigdata.com/ticket/558 (SPARQL INSERT not working in same request after INSERT DATA) +- http://trac.bigdata.com/ticket/562 (Sub-select in INSERT cause NPE in UpdateExprBuilder) +- http://trac.bigdata.com/ticket/563 (DISTINCT ORDER BY) +- http://trac.bigdata.com/ticket/567 (Failure to set cached value on IV results in incorrect behavior for complex UPDATE operation) +- http://trac.bigdata.com/ticket/568 (DELETE WHERE fails with Java AssertionError) +- http://trac.bigdata.com/ticket/569 (LOAD-CREATE-LOAD using virgin journal fails with "Graph exists" exception) +- http://trac.bigdata.com/ticket/571 (DELETE/INSERT WHERE handling of blank nodes) +- http://trac.bigdata.com/ticket/573 (NullPointerException when attempting to INSERT DATA containing a blank node) + +1.2.0: (*) + +- http://trac.bigdata.com/ticket/92 (Monitoring webapp) +- http://trac.bigdata.com/ticket/267 (Support evaluation of 3rd party operators) +- http://trac.bigdata.com/ticket/337 (Compact and efficient movement of binding sets between nodes.) +- http://trac.bigdata.com/ticket/433 (Cluster leaks threads under read-only index operations: DGC thread leak) +- http://trac.bigdata.com/ticket/437 (Thread-local cache combined with unbounded thread pools causes effective memory leak: termCache memory leak & thread-local buffers) +- http://trac.bigdata.com/ticket/438 (KeyBeforePartitionException on cluster) +- http://trac.bigdata.com/ticket/439 (Class loader problem) +- http://trac.bigdata.com/ticket/441 (Ganglia integration) +- http://trac.bigdata.com/ticket/443 (Logger for RWStore transaction service and recycler) +- http://trac.bigdata.com/ticket/444 (SPARQL query can fail to notice when IRunningQuery.isDone() on cluster) +- http://trac.bigdata.com/ticket/445 (RWStore does not track tx release correctly) +- http://trac.bigdata.com/ticket/446 (HTTP Repostory broken with bigdata 1.1.0) +- http://trac.bigdata.com/ticket/448 (SPARQL 1.1 UPDATE) +- http://trac.bigdata.com/ticket/449 (SPARQL 1.1 Federation extension) +- http://trac.bigdata.com/ticket/451 (Serialization error in SIDs mode on cluster) +- http://trac.bigdata.com/ticket/454 (Global Row Store Read on Cluster uses Tx) +- http://trac.bigdata.com/ticket/456 (IExtension implementations do point lookups on lexicon) +- http://trac.bigdata.com/ticket/457 ("No such index" on cluster under concurrent query workload) +- http://trac.bigdata.com/ticket/458 (Java level deadlock in DS) +- http://trac.bigdata.com/ticket/460 (Uncaught interrupt resolving RDF terms) +- http://trac.bigdata.com/ticket/461 (KeyAfterPartitionException / KeyBeforePartitionException on cluster) +- http://trac.bigdata.com/ticket/463 (NoSuchVocabularyItem with LUBMVocabulary for DerivedNumericsExtension) +- http://trac.bigdata.com/ticket/464 (Query statistics do not update correctly on cluster) +- http://trac.bigdata.com/ticket/465 (Too many GRS reads on cluster) +- http://trac.bigdata.com/ticket/469 (Sail does not flush assertion buffers before query) +- http://trac.bigdata.com/ticket/472 (acceptTaskService pool size on cluster) +- http://trac.bigdata.com/ticket/475 (Optimize serialization for query messages on cluster) +- http://trac.bigdata.com/ticket/476 (Test suite for writeCheckpoint() and recycling for BTree/HTree) +- http://trac.bigdata.com/ticket/478 (Cluster does not map input solution(s) across shards) +- http://trac.bigdata.com/ticket/480 (Error releasing deferred frees using 1.0.6 against a 1.0.4 journal) +- http://trac.bigdata.com/ticket/481 (PhysicalAddressResolutionException against 1.0.6) +- http://trac.bigdata.com/ticket/482 (RWStore reset() should be thread-safe for concurrent readers) +- http://trac.bigdata.com/ticket/484 (Java API for NanoSparqlServer REST API) +- http://trac.bigdata.com/ticket/491 (AbstractTripleStore.destroy() does not clear the locator cache) +- http://trac.bigdata.com/ticket/492 (Empty chunk in ThickChunkMessage (cluster)) +- http://trac.bigdata.com/ticket/493 (Virtual Graphs) +- http://trac.bigdata.com/ticket/496 (Sesame 2.6.3) +- http://trac.bigdata.com/ticket/497 (Implement STRBEFORE, STRAFTER, and REPLACE) +- http://trac.bigdata.com/ticket/498 (Bring bigdata RDF/XML parser up to openrdf 2.6.3.) +- http://trac.bigdata.com/ticket/500 (SPARQL 1.1 Service Description) +- http://www.openrdf.org/issues/browse/SES-884 (Aggregation with an solution set as input should produce an empty solution as output) +- http://www.openrdf.org/issues/browse/SES-862 (Incorrect error handling for SPARQL aggregation; fix in 2.6.1) +- http://www.openrdf.org/issues/browse/SES-873 (Order the same Blank Nodes together in ORDER BY) +- http://trac.bigdata.com/ticket/501 (SPARQL 1.1 BINDINGS are ignored) +- http://trac.bigdata.com/ticket/503 (Bigdata2Sesame2BindingSetIterator throws QueryEvaluationException were it should throw NoSuchElementException) +- http://trac.bigdata.com/ticket/504 (UNION with Empty Group Pattern) +- http://trac.bigdata.com/ticket/505 (Exception when using SPARQL sort & statement identifiers) +- http://trac.bigdata.com/ticket/506 (Load, closure and query performance in 1.1.x versus 1.0.x) +- http://trac.bigdata.com/ticket/508 (LIMIT causes hash join utility to log errors) +- http://trac.bigdata.com/ticket/513 (Expose the LexiconConfiguration to Function BOPs) +- http://trac.bigdata.com/ticket/515 (Query with two "FILTER NOT EXISTS" expressions returns no results) +- http://trac.bigdata.com/ticket/516 (REGEXBOp should cache the Pattern when it is a constant) +- http://trac.bigdata.com/ticket/517 (Java 7 Compiler Compatibility) +- http://trac.bigdata.com/ticket/518 (Review function bop subclass hierarchy, optimize datatype bop, etc.) +- http://trac.bigdata.com/ticket/520 (CONSTRUCT WHERE shortcut) +- http://trac.bigdata.com/ticket/521 (Incremental materialization of Tuple and Graph query results) +- http://trac.bigdata.com/ticket/525 (Modify the IChangeLog interface to support multiple agents) +- http://trac.bigdata.com/ticket/527 (Expose timestamp of LexiconRelation to function bops) +- http://trac.bigdata.com/ticket/532 (ClassCastException during hash join (can not be cast to TermId)) +- http://trac.bigdata.com/ticket/533 (Review materialization for inline IVs) +- http://trac.bigdata.com/ticket/534 (BSBM BI Q5 error using MERGE JOIN) + +1.1.0 (*) + + - http://trac.bigdata.com/ticket/23 (Lexicon joins) + - http://trac.bigdata.com/ticket/109 (Store large literals as "blobs") + - http://trac.bigdata.com/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://trac.bigdata.com/ticket/203 (Implement an persistence capable hash table to support analytic query) + - http://trac.bigdata.com/ticket/209 (AccessPath should visit binding sets rather than elements for high level query.) + - http://trac.bigdata.com/ticket/227 (SliceOp appears to be necessary when operator plan should suffice without) + - http://trac.bigdata.com/ticket/232 (Bottom-up evaluation semantics). + - http://trac.bigdata.com/ticket/246 (Derived xsd numeric data types must be inlined as extension types.) + - http://trac.bigdata.com/ticket/254 (Revisit pruning of intermediate variable bindings during query execution) + - http://trac.bigdata.com/ticket/261 (Lift conditions out of subqueries.) + - http://trac.bigdata.com/ticket/300 (Native ORDER BY) + - http://trac.bigdata.com/ticket/324 (Inline predeclared URIs and namespaces in 2-3 bytes) + - http://trac.bigdata.com/ticket/330 (NanoSparqlServer does not locate "html" resources when run from jar) + - http://trac.bigdata.com/ticket/334 (Support inlining of unicode data in the statement indices.) + - http://trac.bigdata.com/ticket/364 (Scalable default graph evaluation) + - http://trac.bigdata.com/ticket/368 (Prune variable bindings during query evaluation) + - http://trac.bigdata.com/ticket/370 (Direct translation of openrdf AST to bigdata AST) + - http://trac.bigdata.com/ticket/373 (Fix StrBOp and other IValueExpressions) + - http://trac.bigdata.com/ticket/377 (Optimize OPTIONALs with multiple statement patterns.) + - http://trac.bigdata.com/ticket/380 (Native SPARQL evaluation on cluster) + - http://trac.bigdata.com/ticket/387 (Cluster does not compute closure) + - http://trac.bigdata.com/ticket/395 (HTree hash join performance) + - http://trac.bigdata.com/ticket/401 (inline xsd:unsigned datatypes) + - http://trac.bigdata.com/ticket/408 (xsd:string cast fails for non-numeric data) + - http://trac.bigdata.com/ticket/421 (New query hints model.) + - http://trac.bigdata.com/ticket/431 (Use of read-only tx per query defeats cache on cluster) + +1.0.3 + + - http://trac.bigdata.com/ticket/217 (BTreeCounters does not track bytes released) + - http://trac.bigdata.com/ticket/269 (Refactor performance counters using accessor interface) + - http://trac.bigdata.com/ticket/329 (B+Tree should delete bloom filter when it is disabled.) + - http://trac.bigdata.com/ticket/372 (RWStore does not prune the CommitRecordIndex) + - http://trac.bigdata.com/ticket/375 (Persistent memory leaks (RWStore/DISK)) + - http://trac.bigdata.com/ticket/385 (FastRDFValueCoder2: ArrayIndexOutOfBoundsException) + - http://trac.bigdata.com/ticket/391 (Release age advanced on WORM mode journal) + - http://trac.bigdata.com/ticket/392 (Add a DELETE by access path method to the NanoSparqlServer) + - http://trac.bigdata.com/ticket/393 (Add "context-uri" request parameter to specify the default context for INSERT in the REST API) + - http://trac.bigdata.com/ticket/394 (log4j configuration error message in WAR deployment) + - http://trac.bigdata.com/ticket/399 (Add a fast range count method to the REST API) + - http://trac.bigdata.com/ticket/422 (Support temp triple store wrapped by a BigdataSail) + - http://trac.bigdata.com/ticket/424 (NQuads support for NanoSparqlServer) + - http://trac.bigdata.com/ticket/425 (Bug fix to DEFAULT_RDF_FORMAT for bulk data loader in scale-out) + - http://trac.bigdata.com/ticket/426 (Support either lockfile (procmail) and dotlockfile (liblockfile1) in scale-out) + - http://trac.bigdata.com/ticket/427 (BigdataSail#getReadOnlyConnection() race condition with concurrent commit) + - http://trac.bigdata.com/ticket/435 (Address is 0L) + - http://trac.bigdata.com/ticket/436 (TestMROWTransactions failure in CI) + +1.0.2 + + - http://trac.bigdata.com/ticket/32 (Query time expansion of (foo rdf:type rdfs:Resource) drags in SPORelation for scale-out.) + - http://trac.bigdata.com/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://trac.bigdata.com/ticket/356 (Query not terminated by error.) + - http://trac.bigdata.com/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://trac.bigdata.com/ticket/361 (IRunningQuery not closed promptly.) + - http://trac.bigdata.com/ticket/371 (DataLoader fails to load resources available from the classpath.) + - http://trac.bigdata.com/ticket/376 (Support for the streaming of bigdata IBindingSets into a sparql query.) + - http://trac.bigdata.com/ticket/378 (ClosedByInterruptException during heavy query mix.) + - http://trac.bigdata.com/ticket/379 (NotSerializableException for SPOAccessPath.) + - http://trac.bigdata.com/ticket/382 (Change dependencies to Apache River 2.2.0) + +1.0.1 (*) + + - http://trac.bigdata.com/ticket/107 (Unicode clean schema names in the sparse row store). + - http://trac.bigdata.com/ticket/124 (TermIdEncoder should use more bits for scale-out). + - http://trac.bigdata.com/ticket/225 (OSX requires specialized performance counter collection classes). + - http://trac.bigdata.com/ticket/348 (BigdataValueFactory.asValue() must return new instance when DummyIV is used). + - http://trac.bigdata.com/ticket/349 (TermIdEncoder limits Journal to 2B distinct RDF Values per triple/quad store instance). + - http://trac.bigdata.com/ticket/351 (SPO not Serializable exception in SIDS mode (scale-out)). + - http://trac.bigdata.com/ticket/352 (ClassCastException when querying with binding-values that are not known to the database). + - http://trac.bigdata.com/ticket/353 (UnsupportedOperatorException for some SPARQL queries). + - http://trac.bigdata.com/ticket/355 (Query failure when comparing with non materialized value). + - http://trac.bigdata.com/ticket/357 (RWStore reports "FixedAllocator returning null address, with freeBits".) + - http://trac.bigdata.com/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://trac.bigdata.com/ticket/362 (log4j - slf4j bridge.) + +For more information about bigdata(R), please see the following links: + +[1] http://wiki.bigdata.com/wiki/index.php/Main_Page +[2] http://wiki.bigdata.com/wiki/index.php/GettingStarted +[3] http://wiki.bigdata.com/wiki/index.php/Roadmap +[4] http://www.bigdata.com/bigdata/docs/api/ +[5] http://sourceforge.net/projects/bigdata/ +[6] http://www.bigdata.com/blog +[7] http://www.systap.com/bigdata.htm +[8] http://sourceforge.net/projects/bigdata/files/bigdata/ +[9] http://wiki.bigdata.com/wiki/index.php/DataMigration +[10] http://wiki.bigdata.com/wiki/index.php/HAJournalServer +[11] http://www.bigdata.com/whitepapers/reifSPARQL.pdf +[12] http://wiki.bigdata.com/wiki/index.php/RDF_GAS_API + +About bigdata: + +Bigdata(R) is a horizontally-scaled, general purpose storage and computing fabric for ordered data (B+Trees), designed to operate on either a single server or a cluster of commodity hardware. Bigdata(R) uses dynamically partitioned key-range shards in order to remove any realistic scaling limits - in principle, bigdata(R) may be deployed on 10s, 100s, or even thousands of machines and new capacity may be added incrementally without requiring the full reload of all data. The bigdata(R) RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), and datum level provenance. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jer...@us...> - 2014-09-23 20:09:29
|
Revision: 8657 http://sourceforge.net/p/bigdata/code/8657 Author: jeremy_carroll Date: 2014-09-23 20:09:20 +0000 (Tue, 23 Sep 2014) Log Message: ----------- Fix? for trac1015. Using YourKit, and the "Advanced Object Generation Number" and the "Capture Memory Snapshot" with a dummy test suite of thousands of ExampleProtocolTest instances I reduced the observable memory leakage from 150k/s to 15k/s. I am hoping this is good enough to unblock the CI Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java 2014-09-22 14:54:35 UTC (rev 8656) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java 2014-09-23 20:09:20 UTC (rev 8657) @@ -129,6 +129,12 @@ client = new DefaultHttpClient(DefaultClientConnectionManagerFactory.getInstance().newInstance()); resetDefaultOptions(); } + @Override + public void tearDown() throws Exception { + client.getConnectionManager().shutdown(); + client = null; + super.tearDown(); + } /** * This method is called automatically after each call to {@link #serviceRequest(String...)} * so probably is unnecessary. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-22 14:54:42
|
Revision: 8656 http://sourceforge.net/p/bigdata/code/8656 Author: thompsonbry Date: 2014-09-22 14:54:35 +0000 (Mon, 22 Sep 2014) Log Message: ----------- I have integrated a high concurrency LRU into the QueryEngine. This is used to track the UUID of an operation that has been CANCELed through the REST API but which is neither (a) currently running; nor (b) recently done. In this case, the UUID is entered into the new "pendingCancelLRU". The pendingCancelLRU is checked before we start a new query (all code paths) and before we start a SPARQL UPDATE operation (REST API). If the operation is found in the pendingCancelLRU, then the Future of that operation is cancelled. This occurs before the Future is submitted for evaluation to avoid a race in which the operation might complete before it was cancelled. Changes are to: - QueryEngine: added pendingCancelLRU and associated access methods. The pendingCancelLRU is checked for QUERY in startEval(). - BigdataRDFContext: Handle pending CANCEL of UPDATE requests. - StatusServlet: Queue UUID in the pendingCancelLRU iff not found when CANCEL request is processed. The AST, SPARQL, and NSS test suites are good locally (but see #1015 for memory leaks in the test suite). Committed to CI. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2014-09-19 12:26:56 UTC (rev 8655) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2014-09-22 14:54:35 UTC (rev 8656) @@ -64,6 +64,7 @@ import com.bigdata.btree.BTree; import com.bigdata.btree.IndexSegment; import com.bigdata.btree.view.FusedView; +import com.bigdata.cache.ConcurrentWeakValueCache; import com.bigdata.concurrent.FutureTaskMon; import com.bigdata.counters.CounterSet; import com.bigdata.counters.ICounterSetAccess; @@ -71,6 +72,7 @@ import com.bigdata.journal.IIndexManager; import com.bigdata.journal.Journal; import com.bigdata.rawstore.IRawStore; +import com.bigdata.rdf.internal.constraints.TrueBOp; import com.bigdata.rdf.sail.webapp.client.DefaultClientConnectionManagerFactory; import com.bigdata.resources.IndexManager; import com.bigdata.service.IBigdataFederation; @@ -535,7 +537,7 @@ /** * The currently executing queries. */ - final private ConcurrentHashMap<UUID/* queryId */, AbstractRunningQuery> runningQueries = new ConcurrentHashMap<UUID, AbstractRunningQuery>(); + private final ConcurrentHashMap<UUID/* queryId */, AbstractRunningQuery> runningQueries = new ConcurrentHashMap<UUID, AbstractRunningQuery>(); /** * LRU cache used to handle problems with asynchronous termination of @@ -554,7 +556,7 @@ * enough that we can not have a false cache miss on a system which is * heavily loaded by a bunch of light queries. */ - private LinkedHashMap<UUID, IHaltable<Void>> doneQueries = new LinkedHashMap<UUID,IHaltable<Void>>( + private final LinkedHashMap<UUID, IHaltable<Void>> doneQueries = new LinkedHashMap<UUID,IHaltable<Void>>( 16/* initialCapacity */, .75f/* loadFactor */, true/* accessOrder */) { private static final long serialVersionUID = 1L; @@ -568,6 +570,92 @@ }; /** + * A high concurrency cache operating as an LRU designed to close a data + * race between the asynchronous start of a submitted query or update + * operation and the explicit asynchronous CANCEL of that operation using + * its pre-assigned {@link UUID}. + * <p> + * When a CANCEL request is received, we probe both the + * {@link #runningQueries} and the {@link #doneQueries}. If no operation is + * associated with that request, then we probe the running UPDATE + * operations. Finally, if no such operation was discovered, then the + * {@link UUID} of the operation to be cancelled is entered into this + * collection. + * <p> + * Before a query starts, we consult the {@link #pendingCancelLRU}. If the + * {@link UUID} of the query is discovered, then the query is cancelled + * rather than run. + * <p> + * Note: The capacity of the backing hard reference queue is quite small. + * {@link UUID}s are only entered into this collection if a CANCEL request + * is asynchronously received either (a) before; or (b) long enough after a + * query or update is executed that is not not found in either the running + * queries map or the recently done queries map. + * + * TODO There are some cases that are not covered by this. First, we do not + * have {@link UUID}s for all REST API methods and thus they can not all be + * cancelled. If we allowed an HTTP header to specify the UUID of the + * request, then we could associate a UUID with all requests. The ongoing + * refactor to support clean interrupt of NSS requests (#753) and the + * ongoing refactor to support concurrent unisolated operations against the + * same journal (#566) will provide us with the mechanisms to identify all + * such operations so we can check their assigned UUIDs and cancel them when + * requested. + * + * @see <a href="http://trac.bigdata.com/ticket/899"> REST API Query + * Cancellation </a> + * @see <a href="http://trac.bigdata.com/ticket/753"> HA doLocalAbort() + * should interrupt NSS requests and AbstractTasks </a> + * @see <a href="http://trac.bigdata.com/ticket/566"> Concurrent unisolated + * operations against multiple KBs on the same Journal </a> + * @see #startEval(UUID, PipelineOp, Map, IChunkMessage) + */ + private final ConcurrentWeakValueCache<UUID, UUID> pendingCancelLRU = new ConcurrentWeakValueCache<>( + 50/* queueCapacity (SWAG, but see above) */); + + /** + * Add a query {@link UUID} to the LRU of query identifiers for which we + * have received a CANCEL request, but were unable to find a running QUERY, + * recently done query, or running UPDATE request. + * + * @param queryId + * The UUID of the operation to be cancelled. + * + * @see <a href="http://trac.bigdata.com/ticket/899"> REST API Query + * Cancellation </a> + */ + public void addPendingCancel(final UUID queryId) { + + if (queryId == null) + throw new IllegalArgumentException(); + + pendingCancelLRU.putIfAbsent(queryId, queryId); + + } + + /** + * Return <code>true</code> iff the {@link UUID} is the the collection of + * {@link UUID}s for which we have already received a CANCEL request. + * <p> + * Note: The {@link UUID} is removed from the pending cancel collection as a + * side-effect. + * + * @param queryId + * The {@link UUID} of the operation. + * + * @return <code>true</code> if that operation has already been marked for + * cancellation. + */ + public boolean pendingCancel(final UUID queryId) { + + if (queryId == null) + throw new IllegalArgumentException(); + + return pendingCancelLRU.remove(queryId) != null; + + } + + /** * A queue of {@link ChunkedRunningQuery}s having binding set chunks available for * consumption. * @@ -1695,6 +1783,22 @@ // if (c != null) // c.startCount.increment(); + if (pendingCancelLRU.containsKey(runningQuery.getQueryId())) { + /* + * The query was asynchronously scheduled for cancellation. + */ + + // Cancel the query. + runningQuery.cancel(true/* mayInterruptIfRunning */); + + // Remove from the CANCEL LRU. + pendingCancelLRU.remove(runningQuery.getQueryId()); + + // Return the query. It has already been cancelled. + return runningQuery; + + } + // notify query start runningQuery.startQuery(msg); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-09-19 12:26:56 UTC (rev 8655) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-09-22 14:54:35 UTC (rev 8656) @@ -75,6 +75,7 @@ import com.bigdata.BigdataStatics; import com.bigdata.bop.engine.IRunningQuery; import com.bigdata.bop.engine.QueryEngine; +import com.bigdata.bop.fed.QueryEngineFactory; import com.bigdata.counters.CAT; import com.bigdata.io.NullOutputStream; import com.bigdata.journal.IIndexManager; @@ -1015,6 +1016,30 @@ m_queries.put(queryId, r); m_queries2.put(queryId2, r); + /** + * Handle data races in CANCEL of an UPDATE operation whose + * cancellation was requested before it began to execute. + * + * @see <a href="http://trac.bigdata.com/ticket/899"> REST API Query + * Cancellation </a> + */ + { + + final QueryEngine queryEngine = QueryEngineFactory + .getQueryController(getIndexManager()); + + if (queryEngine.pendingCancel(queryId2)) { + + /* + * There is a pending CANCEL for this UPDATE request, so + * cancel it now. + */ + updateFuture.cancel(true/* mayInterruptIfRunning */); + + } + + } + return update; } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-09-19 12:26:56 UTC (rev 8655) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-09-22 14:54:35 UTC (rev 8656) @@ -234,17 +234,11 @@ * * @throws IOException * + * @see <a href="http://trac.bigdata.com/ticket/899"> REST API Query + * Cancellation </a> + * * FIXME GROUP COMMIT: Review cancellation and leader fail * scenarios. - * - * FIXME CANCEL: A remote client can not act to cancel a request - * that is in the queue until it begins to execute. This is - * because the UUID is not assigned until the request begins to - * execute. This is true for both SPARQL QUERY and SPARQL UPDATE - * requests. We need to track the CANCEL requests on a LRU and - * apply them if we observe the query / update arriving after - * the CANCEL. See <a href="http://trac.bigdata.com/ticket/988" - * > REST API cancellation of queries</a> */ static void doCancelQuery(final HttpServletRequest req, final HttpServletResponse resp, final IIndexManager indexManager, @@ -276,6 +270,7 @@ if (!tryCancelQuery(queryEngine, queryId)) { if (!tryCancelUpdate(context, queryId)) { + queryEngine.addPendingCancel(queryId); if (log.isInfoEnabled()) { log.info("No such QUERY or UPDATE: " + queryId); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-19 12:27:04
|
Revision: 8655 http://sourceforge.net/p/bigdata/code/8655 Author: thompsonbry Date: 2014-09-19 12:26:56 +0000 (Fri, 19 Sep 2014) Log Message: ----------- This addresses a security vunerability with the commons-fileupload component. I have run the NSS and BigdataSailWithQuads test suites locally. Committing to CI for broader validation. See #1010 (Update apache http commons-fileupload). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/.classpath branches/BIGDATA_RELEASE_1_3_0/build.properties branches/BIGDATA_RELEASE_1_3_0/pom.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.3.1.jar Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.2.2.jar Modified: branches/BIGDATA_RELEASE_1_3_0/.classpath =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-09-18 10:34:41 UTC (rev 8654) +++ branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-09-19 12:26:56 UTC (rev 8655) @@ -73,7 +73,6 @@ <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/httpclient-cache-4.1.3.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/httpcore-4.1.4.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/httpmime-4.1.3.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/commons-fileupload-1.2.2.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/commons-io-2.1.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/apache/log4j-1.2.17.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/openrdf-sesame-2.6.10-onejar.jar" sourcepath="/Users/bryan/Documents/workspace/org.openrdf.sesame-2.6.10"/> @@ -99,5 +98,6 @@ <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-test-2.5.0.jar"/> <classpathentry kind="lib" path="bigdata-blueprints/lib/rexster-core-2.5.0.jar"/> <classpathentry kind="lib" path="bigdata-blueprints/lib/commons-configuration-1.10.jar"/> + <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/commons-fileupload-1.3.1.jar"/> <classpathentry kind="output" path="bin"/> </classpath> Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.2.2.jar =================================================================== (Binary files differ) Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.3.1.jar =================================================================== (Binary files differ) Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.3.1.jar =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.3.1.jar 2014-09-18 10:34:41 UTC (rev 8654) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.3.1.jar 2014-09-19 12:26:56 UTC (rev 8655) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.3.1.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-09-18 10:34:41 UTC (rev 8654) +++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-09-19 12:26:56 UTC (rev 8655) @@ -55,7 +55,7 @@ servlet.version=3.1.0 lucene.version=3.0.0 apache.commons_codec.version=1.4 -apache.commons_fileupload.version=1.2.2 +apache.commons_fileupload.version=1.3.1 apache.commons_io.version=2.1 apache.commons_logging.version=1.1.1 apache.httpclient.version=4.1.3 Modified: branches/BIGDATA_RELEASE_1_3_0/pom.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/pom.xml 2014-09-18 10:34:41 UTC (rev 8654) +++ branches/BIGDATA_RELEASE_1_3_0/pom.xml 2014-09-19 12:26:56 UTC (rev 8655) @@ -83,7 +83,7 @@ <servlet.version>3.1.0</servlet.version> <lucene.version>3.0.0</lucene.version> <apache.commons_codec.version>1.4</apache.commons_codec.version> - <apache.commons_fileupload.version>1.2.2</apache.commons_fileupload.version> + <apache.commons_fileupload.version>1.3.1</apache.commons_fileupload.version> <apache.commons_io.version>2.1</apache.commons_io.version> <apache.commons_logging.version>1.1.1</apache.commons_logging.version> <apache.httpclient.version>4.1.3</apache.httpclient.version> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-09-18 10:34:44
|
Revision: 8654 http://sourceforge.net/p/bigdata/code/8654 Author: martyncutcher Date: 2014-09-18 10:34:41 +0000 (Thu, 18 Sep 2014) Log Message: ----------- Add first branch-notes prior to any code changes Added Paths: ----------- branches/JETTY_HTTPCLIENT/branch-notes.txt Added: branches/JETTY_HTTPCLIENT/branch-notes.txt =================================================================== --- branches/JETTY_HTTPCLIENT/branch-notes.txt (rev 0) +++ branches/JETTY_HTTPCLIENT/branch-notes.txt 2014-09-18 10:34:41 UTC (rev 8654) @@ -0,0 +1,3 @@ +This notes file is created as an additional aid to monitor code changes. + +It is originally committed on branch creation before any other code changes, so comparisons can be made with its first commit revision to help in code comparisons. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-09-18 10:31:08
|
Revision: 8653 http://sourceforge.net/p/bigdata/code/8653 Author: martyncutcher Date: 2014-09-18 10:31:04 +0000 (Thu, 18 Sep 2014) Log Message: ----------- Branch to implement Jetty HttpClient refactor Added Paths: ----------- branches/JETTY_HTTPCLIENT/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-16 18:11:41
|
Revision: 8652 http://sourceforge.net/p/bigdata/code/8652 Author: thompsonbry Date: 2014-09-16 18:11:37 +0000 (Tue, 16 Sep 2014) Log Message: ----------- javadoc Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2014-09-16 18:11:20 UTC (rev 8651) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2014-09-16 18:11:37 UTC (rev 8652) @@ -56,6 +56,10 @@ * Message issued when preparing for a commit. The next message will be * either {@link #transactionCommited(long)} or * {@link #transactionAborted()}. + * <p> + * Note: The listener will have observed all updates by the time this + * message is generated. Thus, this message can be used to validate + * post-conditions for the transaction. */ void transactionPrepare(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-16 18:11:32
|
Revision: 8651 http://sourceforge.net/p/bigdata/code/8651 Author: thompsonbry Date: 2014-09-16 18:11:20 +0000 (Tue, 16 Sep 2014) Log Message: ----------- Marked directed edge on graphic. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/data/ssspGraph.png Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/test/com/bigdata/rdf/graph/data/ssspGraph.png =================================================================== (Binary files differ) This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-16 15:46:16
|
Revision: 8650 http://sourceforge.net/p/bigdata/code/8650 Author: thompsonbry Date: 2014-09-16 15:46:10 +0000 (Tue, 16 Sep 2014) Log Message: ----------- @Override annotation Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java 2014-09-11 19:56:32 UTC (rev 8649) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java 2014-09-16 15:46:10 UTC (rev 8650) @@ -702,6 +702,7 @@ * <p> * {@inheritDoc} */ + @Override public void cancelQuery(final UUID queryId, final Throwable cause) { // lookup query by id. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-11 19:56:47
|
Revision: 8649 http://sourceforge.net/p/bigdata/code/8649 Author: thompsonbry Date: 2014-09-11 19:56:32 +0000 (Thu, 11 Sep 2014) Log Message: ----------- Merge in change set from the main development branch. Also, reduced log@WARN => log@INFO in CreateKBTask. @see #714 (openrdf 2.7) Modified Paths: -------------- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractBTree.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractNode.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/BTree.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/DefaultEvictionListener.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/htree/AbstractHTree.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/htree/DefaultEvictionListener.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/htree/HTree.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/AbstractCommitTimeIndex.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/ICommitter.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/service/EventReceiver.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/service/LoadBalancerService.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/stream/Stream.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataQueryProjection.java branches/SESAME_2_7/bigdata-sails/src/java/com/bigdata/rdf/sail/CreateKBTask.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataArbitraryLengthPathTest.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java Added Paths: ----------- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/EvictionError.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/IReadWriteLockManager.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/IndexInconsistentError.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ReadWriteLockManager.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialEdge.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialElement.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialVertex.java Property Changed: ---------------- branches/SESAME_2_7/ Index: branches/SESAME_2_7 =================================================================== --- branches/SESAME_2_7 2014-09-11 19:26:33 UTC (rev 8648) +++ branches/SESAME_2_7 2014-09-11 19:56:32 UTC (rev 8649) Property changes on: branches/SESAME_2_7 ___________________________________________________________________ Modified: svn:mergeinfo ## -1,6 +1,7 ## /branches/BIGDATA_MGC_HA1_HA5:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0:8636-8648 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 \ No newline at end of property Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractBTree.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractBTree.java 2014-09-11 19:26:33 UTC (rev 8648) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractBTree.java 2014-09-11 19:56:32 UTC (rev 8649) @@ -24,6 +24,7 @@ /* * Created on Dec 19, 2006 * + * RESYNC */ package com.bigdata.btree; @@ -40,6 +41,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.FutureTask; +import java.util.concurrent.locks.Lock; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -133,7 +135,6 @@ * </p> * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ * * @see KeyBuilder */ @@ -168,6 +169,15 @@ final protected static String ERROR_TRANSIENT = "Transient"; /** + * An unisolated index view is in an error state. It must be discarded and + * reloaded from the current checkpoint record. + * + * @see <a href="http://trac.bigdata.com/ticket/1005"> Invalidate BTree + * objects if error occurs during eviction </a> + */ + final protected static String ERROR_ERROR_STATE = "Index is in error state"; + + /** * Log for btree opeations. */ protected static final Logger log = Logger.getLogger(AbstractBTree.class); @@ -250,7 +260,7 @@ * mutation. */ final protected boolean readOnly; - + /** * Optional cache for {@link INodeData} and {@link ILeafData} instances and * always <code>null</code> if the B+Tree is transient. @@ -258,6 +268,14 @@ protected final ILRUCache<Long, Object> storeCache; /** + * Hard reference iff the index is mutable (aka unisolated) allows us to + * avoid patterns that create short life time versions of the object to + * protect {@link ICheckpointProtocol#writeCheckpoint2()} and similar + * operations. + */ + private final IReadWriteLockManager lockManager; + + /** * The branching factor for the btree. */ final protected int branchingFactor; @@ -547,6 +565,21 @@ protected volatile AbstractNode<?> root; /** + * This field is set if an error is encountered that renders an unisolated + * index object unusable. For example, this can occur if an error was + * detected during incremental eviction of dirty nodes for a mutable index + * view since that means that there are partly serialized (and possibly + * inconsistenly serialized) evicted pages. Once this becomes non- + * <code>null</code> the index MUST be reloaded from the most recent + * checkpoint before it can be used (that is, you need to obtain a new view + * of the unisolated index since this field is sticky once set). + * + * @see <a href="http://trac.bigdata.com/ticket/1005"> Invalidate BTree + * objects if error occurs during eviction </a> + */ + protected volatile Throwable error; + + /** * An optional bloom filter that will be used to filter point tests against * <i>this</i> {@link AbstractBTree}. A bloom filter provides a strong * guarantee when it reports that a key was not found, but only a weak @@ -960,7 +993,7 @@ this.store = store; this.readOnly = readOnly; - + // /* // * The Memoizer is not used by the mutable B+Tree since it is not safe // * for concurrent operations. @@ -1042,6 +1075,8 @@ } + lockManager = ReadWriteLockManager.getLockManager(this); + } /** @@ -1977,7 +2012,8 @@ } }; - + + @Override final public Object insert(Object key, Object value) { key = metadata.getTupleSerializer().serializeKey(key); @@ -1999,6 +2035,7 @@ } + @Override final public byte[] insert(final byte[] key, final byte[] value) { if (key == null) @@ -2120,6 +2157,7 @@ } + @Override final public Object remove(Object key) { key = metadata.getTupleSerializer().serializeKey(key); @@ -2147,6 +2185,7 @@ * Remove the tuple under that key (will write a delete marker if delete * markers are enabled). */ + @Override final public byte[] remove(final byte[] key) { final Tuple tuple; @@ -2232,8 +2271,10 @@ * and dropping indices vs removing the entries in an individual * {@link AbstractBTree}. */ + @Override abstract public void removeAll(); + @Override public Object lookup(Object key) { key = metadata.getTupleSerializer().serializeKey(key); @@ -2252,6 +2293,7 @@ } + @Override public byte[] lookup(final byte[] key) { final Tuple tuple = lookup(key, getLookupTuple()); @@ -2339,6 +2381,7 @@ } + @Override public boolean contains(Object key) { key = metadata.getTupleSerializer().serializeKey(key); @@ -2359,6 +2402,7 @@ * * @todo add unit test to btree suite w/ and w/o delete markers. */ + @Override public boolean contains(final byte[] key) { if (key == null) @@ -2405,6 +2449,7 @@ } + @Override public long indexOf(final byte[] key) { if (key == null) @@ -2419,6 +2464,7 @@ } + @Override public byte[] keyAt(final long index) { if (index < 0) @@ -2433,6 +2479,7 @@ } + @Override public byte[] valueAt(final long index) { final Tuple tuple = getLookupTuple(); @@ -2466,6 +2513,7 @@ * IRangeQuery */ + @Override final public long rangeCountExact(final byte[] fromKey, final byte[] toKey) { if (!metadata.getDeleteMarkers()) { @@ -2509,6 +2557,7 @@ } + @Override final public long rangeCount() { return rangeCount(null, null); @@ -2546,6 +2595,7 @@ * lookup of the both keys. If both keys are <code>null</code>, then the * cost is zero (no IOs). */ + @Override final public long rangeCount(final byte[] fromKey, final byte[] toKey) { if (fromKey == null && toKey == null) { @@ -2604,6 +2654,7 @@ * considering all sources at once. It uses a range iterator scan visiting * both deleted and undeleted tuples for that. */ + @Override public long rangeCountExactWithDeleted(final byte[] fromKey, final byte[] toKey) { @@ -2649,6 +2700,7 @@ } + @Override final public ITupleIterator rangeIterator() { return rangeIterator(null, null); @@ -2675,6 +2727,7 @@ } + @Override final public ITupleIterator rangeIterator(byte[] fromKey, byte[] toKey) { return rangeIterator(fromKey, toKey, 0/* capacity */, @@ -2767,6 +2820,7 @@ * @todo add support to the iterator construct for filtering by a tuple * revision timestamp range. */ + @Override public ITupleIterator rangeIterator(// final byte[] fromKey,// final byte[] toKey,// @@ -3115,6 +3169,7 @@ } + @Override public Object submit(final byte[] key, final ISimpleIndexProcedure proc) { // conditional range check on the key. @@ -3126,6 +3181,7 @@ } @SuppressWarnings("unchecked") + @Override public void submit(final byte[] fromKey, final byte[] toKey, final IKeyRangeIndexProcedure proc, final IResultHandler handler) { @@ -3147,6 +3203,7 @@ } @SuppressWarnings("unchecked") + @Override public void submit(final int fromIndex, final int toIndex, final byte[][] keys, final byte[][] vals, final AbstractKeyArrayIndexProcedureConstructor ctor, @@ -3369,8 +3426,68 @@ } - doSyncTouch(node); + /** + * At this point we know that the B+Tree object is a mutable data + * structure (!readOnly). If we can prove that the current thread is + * conducting a read-only operation on the B+Tree, then we DO NOT touch + * the node in order to prevent having read-only operations drive + * evictions. This test relies on the UnisolatedReadWriteIndex class to + * provide concurrency control for such interleaved read-only and + * mutation operations on an unisolated (aka mutable) index. + * + * There are three broad ways in which concurrency controls for the + * index classes are realized: + * + * (1) Explicit synchronization. For example, the AbstractJournal uses + * explicit synchronization to protect operations on the unisolated + * Name2Addr. + * + * (2) Explicit pre-declaration of ordered locks. The ConcurrencyManager + * and AbstractTask support this protection mechanism. The task runs + * once it has acquired the locks for the declared unisolated indices. + * + * (3) UnisolatedReadWriteIndex. This is used to provide transparent + * concurrency control for unisolated indices for the triple and quad + * store classes. + * + * The index is mutable (unisolated view). If the thread owns a + * read-only lock then the operation is read-only and we MUST NOT drive + * evictions from this thread. + * + * Note: The order in which we obtain the real read lock and increment + * (and decrement) the per-thread read lock counter on the AbstractBTree + * is not critical because AbstractBTree.touch() relies on the thread + * both owning the read lock and having the per-thread read lock counter + * incremented for that thread. + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: + * Child does not have persistent identity </a> + */ + final int rcount = lockManager.getReadLockCount(); + + if (rcount > 0) { + + /* + * The current thread is executing a read-only operation against the + * mutable index view. DO NOT TOUCH THE EVICTION QUEUE. + */ + + // NOP + + } else { + /* + * The current thread has not promised that it is using a read-only + * operation. Either the operation is a mutation or the index is + * being managed by one of the other two concurrency control + * patterns. In any of these cases, we touch the write retention + * queue for this node reference. + */ + + doSyncTouch(node); + + } + } /** @@ -3433,7 +3550,7 @@ */ // assert isReadOnly() || ndistinctOnWriteRetentionQueue > 0; - + node.referenceCount++; if (!writeRetentionQueue.add(node)) { @@ -3598,6 +3715,15 @@ } +// private void badNode(final AbstractNode<?> node) { +//// try { +//// Thread.sleep(50); +//// } catch (InterruptedException e) { +//// // ignore; +//// } +// throw new AssertionError("ReadOnly and identity: " + node.identity); +// } + /** * Codes the node and writes the coded record on the store (non-recursive). * The node MUST be dirty. If the node has a parent, then the parent is @@ -3617,7 +3743,10 @@ * @return The persistent identity assigned by the store. */ protected long writeNodeOrLeaf(final AbstractNode<?> node) { - + + if (error != null) + throw new IllegalStateException(ERROR_ERROR_STATE, error); + assert root != null; // i.e., isOpen(). assert node != null; assert node.btree == this; @@ -3641,6 +3770,9 @@ * TestMROWTransactions might also demonstrate an issue * occasionally. If so, then check for the same root cause. */ +// if (node.isReadOnly()) { +// badNode(node); // supports debugging +// } assert !node.isReadOnly(); assertNotReadOnly(); @@ -3741,6 +3873,14 @@ // No longer dirty (prevents re-coding on re-eviction). node.setDirty(false); +// if (node.writing == null) { +// log.warn("Concurrent modification of thread guard", new RuntimeException("WTF2: " + node.hashCode())); +// +// throw new AssertionError("Concurrent modification of thread guard"); +// } + +// node.writing = null; + return 0L; } @@ -3768,7 +3908,7 @@ btreeCounters.bytesWritten += nbytes; - btreeCounters.bytesOnStore_nodesAndLeaves.addAndGet(nbytes); + btreeCounters.bytesOnStore_nodesAndLeaves.addAndGet(nbytes); } @@ -3830,6 +3970,14 @@ } +// if (node.writing == null) { +// log.warn("Concurrent modification of thread guard", new RuntimeException("WTF2: " + node.hashCode())); +// +// throw new AssertionError("Concurrent modification of thread guard"); +// } +// +// node.writing = null; + return addr; } @@ -3856,40 +4004,6 @@ if (addr == IRawStore.NULL) throw new IllegalArgumentException(); -// final Long addr2 = Long.valueOf(addr); -// -// if (storeCache != null) { -// -// // test cache : will touch global LRU iff found. -// final IAbstractNodeData data = (IAbstractNodeData) storeCache -// .get(addr); -// -// if (data != null) { -// -// // Node and Leaf MUST NOT make it into the global LRU or store -// // cache! -// assert !(data instanceof AbstractNode<?>); -// -// final AbstractNode<?> node; -// -// if (data.isLeaf()) { -// -// node = nodeSer.nodeFactory.allocLeaf(this, addr, -// (ILeafData) data); -// -// } else { -// -// node = nodeSer.nodeFactory.allocNode(this, addr, -// (INodeData) data); -// -// } -// -// // cache hit. -// return node; -// -// } -// -// } final ByteBuffer tmp; { @@ -3946,21 +4060,6 @@ } -// if (storeCache != null) { -// -// // update cache : will touch global LRU iff cache is modified. -// final IAbstractNodeData data2 = (IAbstractNodeData) storeCache -// .putIfAbsent(addr2, data); -// -// if (data2 != null) { -// -// // concurrent insert, use winner's value. -// data = data2; -// -// } -// -// } - // wrap as Node or Leaf. final AbstractNode<?> node = nodeSer.wrap(this, addr, data); @@ -4061,6 +4160,7 @@ /** * Returns the hard reference. */ + @Override public T get() { return ref; @@ -4070,6 +4170,7 @@ /** * Overridden as a NOP. */ + @Override public void clear() { // NOP @@ -4163,7 +4264,7 @@ */ int getMaxRecLen() { - return metadata.getMaxRecLen(); + return metadata.getMaxRecLen(); } @@ -4304,4 +4405,24 @@ } + @Override + final public Lock readLock() { + + return lockManager.readLock(); + + } + + @Override + final public Lock writeLock() { + + return lockManager.writeLock(); + + } + + @Override + final public int getReadLockCount() { + + return lockManager.getReadLockCount(); + } + } Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractNode.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractNode.java 2014-09-11 19:26:33 UTC (rev 8648) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractNode.java 2014-09-11 19:56:32 UTC (rev 8649) @@ -23,7 +23,6 @@ */ /* * Created on Nov 15, 2006 - * */ package com.bigdata.btree; @@ -37,7 +36,6 @@ import com.bigdata.btree.data.IAbstractNodeData; import com.bigdata.btree.data.IKeysData; -import com.bigdata.btree.data.ISpannedTupleCountData; import com.bigdata.btree.filter.EmptyTupleIterator; import com.bigdata.btree.raba.IRaba; import com.bigdata.btree.raba.MutableKeyBuffer; @@ -51,7 +49,6 @@ * Abstract node supporting incremental persistence and copy-on-write semantics. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public abstract class AbstractNode<T extends AbstractNode /* @@ -539,7 +536,7 @@ parent = (Node) parent.copyOnWrite(oldId); } - + /* * Replace the reference to this child with the reference to the * new child. This makes the old child inaccessible via Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/BTree.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/BTree.java 2014-09-11 19:26:33 UTC (rev 8648) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/BTree.java 2014-09-11 19:56:32 UTC (rev 8649) @@ -23,7 +23,6 @@ */ /* * Created on Nov 15, 2006 - * */ package com.bigdata.btree; @@ -288,7 +287,7 @@ * and otherwise <code>null</code>. */ private final ByteArrayBuffer recordAddrBuf; - + // /** // * The last address from which the {@link IndexMetadata} record was read or // * on which it was written. @@ -385,7 +384,7 @@ */ recordAddrBuf = readOnly ? null : new ByteArrayBuffer(Bytes.SIZEOF_LONG); - + } /** @@ -900,23 +899,31 @@ * @see https://sourceforge.net/apps/trac/bigdata/ticket/343 * @see https://sourceforge.net/apps/trac/bigdata/ticket/440 */ -// final Lock lock = new UnisolatedReadWriteIndex(this).writeLock(); - final Lock lock = UnisolatedReadWriteIndex.getReadWriteLock(this).writeLock(); - lock.lock(); - try { + final Lock lock = writeLock(); + lock.lock(); + try { + /** + * Do not permit checkpoint if the index is in an error state. + * + * @see <a href="http://trac.bigdata.com/ticket/1005"> Invalidate + * BTree objects if error occurs during eviction </a> + */ + if (error != null) + throw new IllegalStateException(ERROR_ERROR_STATE, error); + //synchronized(this) { + if (/* autoCommit && */needsCheckpoint()) { + + /* + * Flush the btree, write a checkpoint record, and return the + * address of that checkpoint record. The [checkpoint] reference + * is also updated. + */ + + return _writeCheckpoint2(); + + } + //} - if (/* autoCommit && */needsCheckpoint()) { - - /* - * Flush the btree, write a checkpoint record, and return the - * address of that checkpoint record. The [checkpoint] reference - * is also updated. - */ - - return _writeCheckpoint2(); - - } - /* * There have not been any writes on this btree or auto-commit is * disabled. @@ -1110,14 +1117,14 @@ @Override final public long getRecordVersion() { - return recordVersion; + return recordVersion; } @Override final public long getMetadataAddr() { - return metadata.getMetadataAddr(); + return metadata.getMetadataAddr(); } @@ -1313,7 +1320,7 @@ @Override public long handleCommit(final long commitTime) { - return writeCheckpoint2().getCheckpointAddr(); + return writeCheckpoint2().getCheckpointAddr(); } Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/DefaultEvictionListener.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/DefaultEvictionListener.java 2014-09-11 19:26:33 UTC (rev 8648) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/DefaultEvictionListener.java 2014-09-11 19:56:32 UTC (rev 8649) @@ -23,7 +23,6 @@ */ /* * Created on Nov 17, 2006 - * */ package com.bigdata.btree; @@ -61,88 +60,132 @@ } final AbstractBTree btree = node.btree; + + if (btree.error != null) { + /** + * This occurs if an error was detected against a mutable view of + * the index (the unisolated index view) and the caller has not + * discarded the index and caused it to be reloaded from the most + * recent checkpoint. + * + * @see <a href="http://trac.bigdata.com/ticket/1005"> Invalidate + * BTree objects if error occurs during eviction </a> + */ + throw new IllegalStateException(AbstractBTree.ERROR_ERROR_STATE, + btree.error); + } + + try { - // Note: This assert can be violated for a read-only B+Tree since there - // is less synchronization. - assert btree.isReadOnly() || btree.ndistinctOnWriteRetentionQueue > 0; + // Note: This assert can be violated for a read-only B+Tree since + // there is less synchronization. + assert btree.isReadOnly() || btree.ndistinctOnWriteRetentionQueue > 0; - btree.ndistinctOnWriteRetentionQueue--; - - if (node.deleted) { + btree.ndistinctOnWriteRetentionQueue--; - /* - * Deleted nodes are ignored as they are evicted from the queue. - */ + if (node.deleted) { - return; + /* + * Deleted nodes are ignored as they are evicted from the queue. + */ - } + return; - // this does not permit transient nodes to be coded. - if (node.dirty && btree.store != null) { -// // this causes transient nodes to be coded on eviction. -// if (node.dirty) { - - if (node.isLeaf()) { + } - /* - * A leaf is written out directly. - */ - - btree.writeNodeOrLeaf(node); + // this does not permit transient nodes to be coded. + if (node.dirty && btree.store != null) { + // // this causes transient nodes to be coded on eviction. + // if (node.dirty) { - } else { + if (node.isLeaf()) { - /* - * A non-leaf node must be written out using a post-order - * traversal so that all dirty children are written through - * before the dirty parent. This is required in order to - * assign persistent identifiers to the dirty children. - */ + /* + * A leaf is written out directly. + */ - btree.writeNodeRecursive(node); + btree.writeNodeOrLeaf(node); - } + } else { - // is a coded data record. - assert node.isCoded(); - - // no longer dirty. - assert !node.dirty; - - if (btree.store != null) { - - // object is persistent (has assigned addr). - assert ref.identity != PO.NULL; - - } - - } // isDirty + /* + * A non-leaf node must be written out using a post-order + * traversal so that all dirty children are written through + * before the dirty parent. This is required in order to + * assign persistent identifiers to the dirty children. + */ - // This does not insert into the cache. That is handled by writeNodeOrLeaf. -// if (btree.globalLRU != null) { + btree.writeNodeRecursive(node); + + } + + // is a coded data record. + assert node.isCoded(); + + // no longer dirty. + assert !node.dirty; + + if (btree.store != null) { + + // object is persistent (has assigned addr). + assert ref.identity != PO.NULL; + + } + + } // isDirty + + // This does not insert into the cache. That is handled by writeNodeOrLeaf. +// if (btree.globalLRU != null) { // -// /* -// * Add the INodeData or ILeafData object to the global LRU, NOT the -// * Node or Leaf. -// * -// * Note: The global LRU touch only occurs on eviction from the write -// * retention queue. This is nice because it limits the touches on -// * the global LRU, which could otherwise be a hot spot. We do a -// * touch whether or not the node was persisted since we are likely -// * to return to the node in either case. -// */ +// /* +// * Add the INodeData or ILeafData object to the global LRU, NOT the +// * Node or Leaf. +// * +// * Note: The global LRU touch only occurs on eviction from the write +// * retention queue. This is nice because it limits the touches on +// * the global LRU, which could otherwise be a hot spot. We do a +// * touch whether or not the node was persisted since we are likely +// * to return to the node in either case. +// */ // -// final IAbstractNodeData delegate = node.getDelegate(); +// final IAbstractNodeData delegate = node.getDelegate(); // -// assert delegate != null : node.toString(); +// assert delegate != null : node.toString(); // -// assert delegate.isCoded() : node.toString(); +// assert delegate.isCoded() : node.toString(); // -// btree.globalLRU.add(delegate); +// btree.globalLRU.add(delegate); // -// } +// } + } catch (Throwable e) { + + if (!btree.readOnly) { + + /** + * If the btree is mutable and an eviction fails, then the index + * MUST be discarded. + * + * @see <a href="http://trac.bigdata.com/ticket/1005"> + * Invalidate BTree objects if error occurs during eviction + * </a> + */ + + btree.error = e; + + // Throw as Error. + throw new EvictionError(e); + + } + + // Launder the throwable. + if (e instanceof RuntimeException) + throw (RuntimeException) e; + + throw new RuntimeException(e); + + } + } } Copied: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/EvictionError.java (from rev 8648, branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/EvictionError.java) =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/EvictionError.java (rev 0) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/EvictionError.java 2014-09-11 19:56:32 UTC (rev 8649) @@ -0,0 +1,66 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 2, 2014 + */ +package com.bigdata.btree; + +/** + * Error marks an mutable index as in an inconsistent state arising from an + * exception during eviction of a dirty node or leaf from a mutable index. The + * index MUST be reloaded from the current checkpoint record. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * @see <a href="http://trac.bigdata.com/ticket/1005"> Invalidate BTree objects + * if error occurs during eviction </a> + */ +public class EvictionError extends IndexInconsistentError { + + /** + * + */ + private static final long serialVersionUID = 1L; + + public EvictionError() { + } + + public EvictionError(String message) { + super(message); + } + + public EvictionError(Throwable cause) { + super(cause); + } + + public EvictionError(String message, Throwable cause) { + super(message, cause); + } + + public EvictionError(String message, Throwable cause, + boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + +} Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java 2014-09-11 19:26:33 UTC (rev 8648) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java 2014-09-11 19:56:32 UTC (rev 8649) @@ -48,7 +48,7 @@ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/585" > GIST </a> */ public interface ICheckpointProtocol extends ICommitter, ICounterSetAccess, - ISimpleIndexAccess { + ISimpleIndexAccess, IReadWriteLockManager { /** * The value of the record version number that will be assigned to the next Copied: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/IReadWriteLockManager.java (from rev 8648, branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IReadWriteLockManager.java) =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/IReadWriteLockManager.java (rev 0) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/IReadWriteLockManager.java 2014-09-11 19:56:32 UTC (rev 8649) @@ -0,0 +1,82 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.btree; + +import java.util.concurrent.locks.Lock; + +/** + * Interface for managing read/write locks on persistence capable data + * structures. + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: Child does + * not have persistent identity </a> + */ +public interface IReadWriteLockManager { + + /** + * Return a {@link Lock} that may be used to obtain a shared read lock which + * is used (in the absence of other concurrency control mechanisms) to + * permit concurrent readers on an unisolated index while serializing access + * to that index when a writer must run. This is exposed for processes which + * need to obtain the write lock to coordinate external operations. + * <p> + * Note: If the persistence capable data structure is read-only then the + * returned {@link Lock} is a singleton that ignores all lock requests. This + * is because our read-only persistence capable data structures are already + * thread-safe for concurrent readers. + * + * @return The lock. + */ + Lock readLock(); + + /** + * Return a {@link Lock} that may be used to obtain an exclusive write lock + * which is used (in the absence of other concurrency control mechanisms) to + * serialize all processes accessing an unisolated index when a writer must + * run. This is exposed for processes which need to obtain the write lock to + * coordinate external operations. + * + * @return The lock. + * + * @throws UnsupportedOperationException + * unless the view supports mutation. + */ + Lock writeLock(); + + /** + * Return the #of read-locks held by the current thread for a mutable index + * view. + * + * @return The #of reentrant read locks held by the current thread -or- ZERO + * if the index is read-only (read locks are not tracked for a + * read-only index view). + */ + int getReadLockCount(); + + /** + * Return <code>true</code> iff the data structure is read-only. + */ + boolean isReadOnly(); + +} Copied: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/IndexInconsistentError.java (from rev 8648, branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexInconsistentError.java) =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/IndexInconsistentError.java (rev 0) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/IndexInconsistentError.java 2014-09-11 19:56:32 UTC (rev 8649) @@ -0,0 +1,67 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 2, 2014 + */ +package com.bigdata.btree; + +/** + * Error marks an mutable index as in an inconsistent state. The index MUST be + * reloaded from the current checkpoint record. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * @see <a href="http://trac.bigdata.com/ticket/1005"> Invalidate BTree objects + * if error occurs during eviction </a> + */ +public class IndexInconsistentError extends Error { + + /** + * + */ + private static final long serialVersionUID = 1L; + + public IndexInconsistentError() { + } + + public IndexInconsistentError(String message) { + super(message); + } + + public IndexInconsistentError(Throwable cause) { + super(cause); + } + + public IndexInconsistentError(String message, Throwable cause) { + super(message, cause); + } + + public IndexInconsistentError(String message, Throwable cause, + boolean enableSuppression, boolean writableStackTrace) { + + super(message, cause, enableSuppression, writableStackTrace); + + } + +} Copied: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ReadWriteLockManager.java (from rev 8648, branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/ReadWriteLockManager.java) =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ReadWriteLockManager.java (rev 0) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ReadWriteLockManager.java 2014-09-11 19:56:32 UTC (rev 8649) @@ -0,0 +1,528 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 2, 2014 + */ +package com.bigdata.btree; + +import java.util.WeakHashMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import com.bigdata.journal.ICommitter; + +/** + * Base class for managing read/write locks for unisolated {@link ICommitter}s. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: Child does + * not have persistent identity </a> + */ +public class ReadWriteLockManager implements IReadWriteLockManager { + +// private static final Logger log = Logger.getLogger(ReadWriteLockManager.class); + + /** + * The #of milliseconds that the class will wait for a read or write lock. A + * (wrapped) {@link InterruptedException} will be thrown if this timeout is + * exceeded. The default is {@value #LOCK_TIMEOUT_MILLIS} milliseconds. Use + * {@link Long#MAX_VALUE} for no timeout. + * + * TODO There may be no reason to have a timeout when waiting for a lock in + * which case we can get rid of this field. Also, there is no means + * available to configure the timeout (in a similar fashion you can not + * configure the fairness policy for the {@link ReentrantReadWriteLock}). + * <p> + * If we get rid of this field, then the {@link WrappedReadLock} and + * {@link WrappedWriteLock} classes can be simplified to have normal lock + * semantics rather than tryLock() based semantics. + */ + private static final long LOCK_TIMEOUT_MILLIS = Long.MAX_VALUE;// 10000; + + /* + * Note: This creates a hard reference that defeats the weak keys in the + * hash map. + */ +// /** +// * The unisolated persistence capable data structure. +// */ +// final private ICheckpointProtocol committer; + + /** + * True iff the caller's {@link ICheckpointProtocol} object was read-only. + */ + final private boolean readOnly; + + /** + * The {@link Lock} used to permit concurrent readers on an unisolated index + * while serializing access to that index when a writer must run. + */ + final private WrappedWriteLock writeLock; + + /** + * The {@link Lock} ensures that any code path that obtains the read lock + * also maintains the per-thread read-lock counter. + */ + final private Lock readLock; + + /** + * Canonicalizing mapping for the {@link ReadWriteLockManager} objects. + */ + static final private WeakHashMap<ICommitter, ReadWriteLockManager> locks = new WeakHashMap<ICommitter, ReadWriteLockManager>(); + + @Override + public int getReadLockCount() { + + if (readOnly) { + + // No locks are actually taken. + return 0; + + } + + // Return the locks actually held by this thread. + final Integer readLockCounter = ((WrappedReadLock) readLock).threadLockMap + .get(Thread.currentThread().getId()); + + if (readLockCounter == null) { + + // No read locks are held. + return 0; + + } + + return readLockCounter.intValue(); + + } + + @Override + public Lock readLock() { + + return readLock; + + } + + @Override + public Lock writeLock() { + + if (readOnly) + throw new UnsupportedOperationException( + AbstractBTree.ERROR_READ_ONLY); + + return writeLock; + + } + + @Override + public boolean isReadOnly() { + + /* + * Note: This method is grounded out without delegation to avoid + * recursion through the target persistence capable data structure. + */ + + return readOnly; + + } + + /** + * {@link WrappedReadLock} is used to intercept lock/unlock calls to the + * readLock to trigger calls to the logic that tracks the #of reentrant + * read-locks by read and which can be used to identify whether the readlock + * is held by the current thread. + * <p> + * This is tested in the touch() methods for the BTree and HTree classe to + * determine whether the touch should be ignored or trigger potential + * evictions. + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: Child + * does not have persistent identity </a> + */ + private class WrappedReadLock implements Lock { + + private final Lock delegate; + + /** + * Maintain count of readLocks on by Thread. This is used to avoid having + * read-only operations protected by an {@link ReadWriteLockManager} + * causing evictions of dirty nodes and leaves. + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: Child + * does not have persistent identity </a> + */ + private final ConcurrentHashMap<Long, Integer> threadLockMap; + + /** + * Track the #of read locks by thread IFF this is a read/write index + * view. + */ + private void readLockedThread() { + final long thisThreadId = Thread.currentThread().getId(); + final Integer entry = threadLockMap.get(thisThreadId); + final Integer newVal = entry == null ? 1 : 1 + entry.intValue(); + threadLockMap.put(thisThreadId, newVal); + } + + /** + * Track the #of read locks by thread IFF this is a read/write index + * view. + */ + private void readUnlockedThread() { + final long thisThreadId = Thread.currentThread().getId(); + final Integer entry = threadLockMap.get(thisThreadId); + assert entry != null; + if (entry.intValue() == 1) { + threadLockMap.remove(thisThreadId); + } else { + threadLockMap.put(thisThreadId, entry.intValue() - 1); + } + } + + WrappedReadLock(final Lock delegate) { + + if (delegate == null) + throw new IllegalArgumentException(); + + this.delegate = delegate; + + /* + * Configure parallelism default. + * + * Note: This CHM is ONLY used by mutable index views. So what + * matters here is the #of threads that contend for a mutable index + * view. I suspect that this is significantly fewer threads than we + * observe for concurrent read-only index views. Therefore I have + * set the parameters for the map based on the notion that only a + * few threads are contending for the mutable index object in order + * to reduce the heap burden associated with these CHM instances. If + * this map is observed to be hot spot, then we can simply use the + * defaults (initialCapacity = concurrencyLevel = 16). We only have + * this for the mutable index views and there are typically not that + * many instances of those open at the same time. + */ + final int initialCapacity = 4; + final int concurrencyLevel = initialCapacity; + final float loadFactor = .75f; + this.threadLockMap = new ConcurrentHashMap<Long, Integer>( + initialCapacity, loadFactor, concurrencyLevel); + + } + + @Override + public void lock() { + try { + /* + * Note: The original UnisolatedReadWriteLock semantics are + * always those of a tryLock with a default timeout. Make sure + * that we keep this in place! + */ + lockInterruptibly(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + @Override + public void lockInterruptibly() throws InterruptedException { + /* + * Note: The order in which we obtain the real read lock and + * increment (and decrement) the per-thread read lock counter on the + * AbstractBTree is not critical because AbstractBTree.touch() + * relies on the thread both owning the read lock and having the + * per-thread read lock counter incremented for that thread. + * + * Note: The original UnisolatedReadWriteLock semantics are always + * those of a tryLock with a default timeout. Make sure that we keep + * this in place! + */ +// delegate.lock(); + if (!delegate.tryLock(LOCK_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)) { + throw new RuntimeException("Timeout"); + } + readLockedThread(); + } + + @Override + public boolean tryLock() { + final boolean ret = delegate.tryLock(); + if (ret) { + readLockedThread(); + } + return ret; + } + + @Override + public boolean tryLock(final long time, final TimeUnit unit) + throws InterruptedException { + final boolean ret = delegate.tryLock(time, unit); + if (ret) { + readLockedThread(); + } + return ret; + } + + @Override + public void unlock() { + /* + * Note: The unlock order does not really matter. See the + * comments on lock() and AbstractBTree.touch(). + */ + delegate.unlock(); + /* + * Do this after the unlock() in case the lock/unlock are not + * correctly paired. + */ + readUnlockedThread(); + } + + @Override + public Condition newCondition() { + return delegate.newCondition(); + } + + } // class WrappedReadLock + + /** + * Wraps the write lock to provide interruptable tryLock() with timeout + * semantics for all write lock acquisitions. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: Child + * does not have persistent identity </a> + */ + private class WrappedWriteLock implements Lock { + + private final Lock delegate; + + WrappedWriteLock(final Lock delegate) { + + if (delegate == null) + throw new IllegalArgumentException(); + + this.delegate = delegate; + + } + + @Override + public void lock() { + try { + /* + * Note: The original UnisolatedReadWriteLock semantics are + * always those of a tryLock with a default timeout. Make sure + * that we keep this in place! + */ + lockInterruptibly(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + @Override + public void lockInterruptibly() throws InterruptedException { + /* + * Note: The original UnisolatedReadWriteLock semantics are always + * those of a tryLock with a default timeout. Make sure that we keep + * this in place! + */ +// delegate.lock(); + if (!delegate.tryLock(LOCK_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)) { + throw new RuntimeException("Timeout"); + } + } + + @Override + public boolean tryLock() { + return delegate.tryLock(); + } + + @Override + public boolean tryLock(final long time, final TimeUnit unit) + throws InterruptedException { + return delegate.tryLock(time, unit); + } + + @Override + public void unlock() { + delegate.unlock(); + } + + @Override + public Condition newCondition() { + return delegate.newCondition(); + } + + } // class WrappedWriteLock + + /** + * Class used for read lock for read-only data structures. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + private static class ConcurrentReaderLock implements Lock { + + @Override + public void lock() { + // NOP + } + + @Override + public void lockInterruptibly() throws InterruptedException { + // NOP + } + + @Override + public boolean tryLock() { + return true; + } + + @Override + public boolean tryLock(long time, TimeUnit unit) + throws InterruptedException { + return true; + } + + @Override + public void unlock() { + // NOP + } + + @Override + public Condition newCondition() { + throw new UnsupportedOperationException(); + } + + } + private static final Lock READ_ONLY_LOCK = new ConcurrentReaderLock(); + + /** + * Canonicalizing factory for the {@link ReadWriteLock} for an + * {@link ICommitter}. + * <p> + * Note: This method CAN NOT be exposed since that breaks encapsulation for + * the {@link WrappedReadLock}. + * + * @param index + * The btree. + * @return The lock. + * + * @throws IllegalArgumentException + * if the argument is <code>null</code>. + */ + static public ReadWriteLockManager getLockManager( + final ICheckpointProtocol index) { + + if (index == null) + throw new IllegalArgumentException(); + + synchronized (locks) { + + ReadWriteLockManager lockManager = locks.get(index); + + if (lockManager == null) { + + lockManager = new ReadWriteLockManager(index); + + locks.put(index, lockManager); + + } + + return lockManager; + } + + } + + /** + * Note: ONLY accessed through the canonicalizing pattern! + */ + private ReadWriteLockManager(final ICheckpointProtocol index) { + +// this.committer = index; + + if (this.readOnly = index.isReadOnly()) { + + /* + * Since the index does not allow mutation, wrap with a NOP lock. + * + * Note: Concurrent readers are automatically supported by our + * persistent capable data structures so we return a NOP Lock + * implementation if the data structure is read-only. Also note that + * read-only data structures are (by definition) not mutable so we + * do not need to track the #of reentrant locks held for a read-only + * data structure (per above). + */ + this.readLock = READ_ONLY_LOCK; + + this.writeLock = null; + + } else { + + /* + * Note: fairness is NOT required for the locks. I believe that this + * is supposed to provide better throughput, but that has not been + * tested. Also, this has not been tested with a simple mutex lock + * vs a read-write lock. The use case for which this class was + * originally developed was computing the fix point of a set of + * rules. In that use case, we do a lot of concurrent reading and + * periodically flush the computed solutions onto the relations. It + * is likely that a read-write lock will do well for this situation. + */ + final ReadWriteLock readWriteLock = new ReentrantReadWriteLock( + false/* fair */); + + /** + * If the index allows mutation, then wrap with tryLock() and + * lock-counting semantics. This allows us to test for the #of + * reentrant locks held by the current thread in + * AbstractBTree.touch() and is the primary basis for the fix the + * ticket below. + * + * @see <a href="http://trac.bigdata.com/ticket/855"> + * AssertionError: Child does not have persistent identity </a> + */ + this.readLock = new WrappedReadLock(readWriteLock.readLock()); + + // Wrap with tryLock() semantics. + this.writeLock = new WrappedWriteLock(readWriteLock.writeLock()); + + } + + } + +// @Override +// final public String toString() { +// +// return getClass().getName() + "{committer=" + committer + ",readOnly=" +// + readOnly + "}"; +// +// } + +} Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java 2014-09-11 19:26:33 UTC (rev 8648) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java 2014-09-11 19:56:32 UTC (rev 8649) @@ -23,20 +23,14 @@ */ /* * Created on Jan 10, 2008 - * */ package com.bigdata.btree; import java.util.Iterator; -import java.util.WeakHashMap; -import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.log4j.Logger; - import com.bigdata.bop.cost.BTreeCostModel; import com.bigdata.bop.cost.DiskCostModel; import com.bigdata.bop.cost.ScanCostReport; @@ -48,10 +42,7 @@ import com.bigdata.btree.view.FusedView; import com.bigdata.counters.CounterSet; import com.bigdata.journal.ConcurrencyManager; -import com.bigdata.journal.ICommitter; import com.bigdata.journal.IConcurrencyManager; -import com.bigdata.journal.Journal; -import com.bigdata.journal.TemporaryStore; import com.bigdata.mdi.IResourceMetadata; import com.bigdata.service.Split; @@ -120,124 +111,52 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ -public class UnisolatedReadWriteIndex implements IIndex, ILinearList { +public class UnisolatedReadWriteIndex implements IIndex, ILinearList, + IReadWriteLockManager { - private static final Logger log = Logger.getLogger(UnisolatedReadWriteIndex.class); - /** - * The #of milliseconds that the class will wait for a read or write lock. A - * (wrapped) {@link InterruptedException} will be thrown if this timeout is - * exceeded. The default is {@value #LOCK_TIMEOUT_MILLIS} milliseconds. Use - * {@link Long#MAX_VALUE} for no timeout. - * - * @todo There may be no reason to have a timeout when waiting for a lock in - * which case we can get rid of this field. Also, there is no means - * available to configur... [truncated message content] |
From: <tho...@us...> - 2014-09-11 19:26:47
|
Revision: 8648 http://sourceforge.net/p/bigdata/code/8648 Author: thompsonbry Date: 2014-09-11 19:26:33 +0000 (Thu, 11 Sep 2014) Log Message: ----------- Two memory leaks. The big one is the AALP test. It does not null out the connection and repo objects after the test. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataArbitraryLengthPathTest.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataArbitraryLengthPathTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataArbitraryLengthPathTest.java 2014-09-10 12:03:27 UTC (rev 8647) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataArbitraryLengthPathTest.java 2014-09-11 19:26:33 UTC (rev 8648) @@ -133,6 +133,8 @@ { con.close(); repo.shutDown(); + con = null; + repo = null; } // @Test Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java 2014-09-10 12:03:27 UTC (rev 8647) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java 2014-09-11 19:26:33 UTC (rev 8648) @@ -642,7 +642,7 @@ */ dataRep = null; - + queryString = null; } /** This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-09-10 12:03:30
|
Revision: 8647 http://sourceforge.net/p/bigdata/code/8647 Author: martyncutcher Date: 2014-09-10 12:03:27 +0000 (Wed, 10 Sep 2014) Log Message: ----------- Ensure expectedResultRepo is shutdown to remove resource leak Modified Paths: -------------- branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/manifest/SPARQLUpdateConformanceTest.java Modified: branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/manifest/SPARQLUpdateConformanceTest.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/manifest/SPARQLUpdateConformanceTest.java 2014-09-09 16:35:15 UTC (rev 8646) +++ branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/manifest/SPARQLUpdateConformanceTest.java 2014-09-10 12:03:27 UTC (rev 8647) @@ -207,6 +207,11 @@ dataRep.shutDown(); dataRep = null; } + + if (expectedResultRepo != null) { + expectedResultRepo.shutDown(); + expectedResultRepo = null; + } } @Override This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-09-09 16:35:18
|
Revision: 8646 http://sourceforge.net/p/bigdata/code/8646 Author: mrpersonick Date: 2014-09-09 16:35:15 +0000 (Tue, 09 Sep 2014) Log Message: ----------- updated to support append-only writes as a config property Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataQueryProjection.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialEdge.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialElement.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialVertex.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-09-06 17:41:51 UTC (rev 8645) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-09-09 16:35:15 UTC (rev 8646) @@ -46,7 +46,6 @@ import org.openrdf.query.TupleQuery; import org.openrdf.query.TupleQueryResult; import org.openrdf.query.Update; -import org.openrdf.query.parser.QueryParserUtil; import org.openrdf.repository.RepositoryConnection; import org.openrdf.repository.RepositoryResult; @@ -81,6 +80,11 @@ */ String READ_FROM_WRITE_CONNECTION = BigdataGraph.class.getName() + ".readFromWriteConnection"; + /** + * Use an append model for properties (rather than replace). + */ + String LAX_PROPERTIES = BigdataGraph.class.getName() + ".laxProperties"; + } /** @@ -118,6 +122,11 @@ */ private final boolean readFromWriteConnection; + /** + * If true, use pure append mode (don't check old property values). + */ + private final boolean laxProperties; + public BigdataGraph(final BlueprintsValueFactory factory) { this(factory, new Properties()); } @@ -131,6 +140,8 @@ Options.LAX_EDGES, "false")); this.readFromWriteConnection = Boolean.valueOf(props.getProperty( Options.READ_FROM_WRITE_CONNECTION, "false")); + this.laxProperties = Boolean.valueOf(props.getProperty( + Options.LAX_PROPERTIES, "false")); this.TYPE = factory.getTypeURI(); this.VERTEX = factory.getVertexURI(); @@ -451,8 +462,12 @@ final RepositoryConnection cxn = getWriteConnection(); - // remove the old value - cxn.remove(uri, prop, null); + if (!laxProperties) { + + // remove the old value + cxn.remove(uri, prop, null); + + } // add the new value cxn.add(uri, prop, val); @@ -478,8 +493,12 @@ final RepositoryConnection cxn = getWriteConnection(); - // remove the old value - cxn.remove(uri, prop, null); + if (!laxProperties) { + + // remove the old value + cxn.remove(uri, prop, null); + + } // add the new values for (Literal val : vals) { @@ -723,13 +742,13 @@ final StringBuilder sb = new StringBuilder(); sb.append("construct { ?from ?edge ?to . } where {\n"); - sb.append(" ?edge rdf:type bd:Edge .\n"); + sb.append(" ?edge <"+TYPE+"> <"+EDGE+"> .\n"); sb.append(" ?from ?edge ?to .\n"); if (labels != null && labels.length > 0) { if (labels.length == 1) { - sb.append(" ?edge rdfs:label \"").append(labels[0]).append("\" .\n"); + sb.append(" ?edge <"+LABEL+"> \"").append(labels[0]).append("\" .\n"); } else { - sb.append(" ?edge rdfs:label ?label .\n"); + sb.append(" ?edge <"+LABEL+"> ?label .\n"); sb.append(" filter(?label in ("); for (String label : labels) { sb.append("\""+label+"\", "); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java 2014-09-06 17:41:51 UTC (rev 8645) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java 2014-09-09 16:35:15 UTC (rev 8646) @@ -22,18 +22,18 @@ */ package com.bigdata.blueprints; +import java.util.Collection; import java.util.UUID; import org.openrdf.model.Literal; import org.openrdf.model.URI; import org.openrdf.model.impl.StatementImpl; -import org.openrdf.model.vocabulary.RDF; -import org.openrdf.model.vocabulary.RDFS; import org.openrdf.repository.RepositoryConnection; import com.bigdata.rdf.changesets.IChangeLog; import com.bigdata.rdf.changesets.IChangeRecord; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; +import com.tinkerpop.blueprints.Direction; import com.tinkerpop.blueprints.Edge; import com.tinkerpop.blueprints.GraphQuery; import com.tinkerpop.blueprints.TransactionalGraph; @@ -161,17 +161,26 @@ } /** - * Set a property without removing the old value first. + * Set a single-value property on an edge or vertex (remove the old + * value first). + * + * @see {@link BigdataElement} */ @Override - public void setProperty(final URI s, final URI p, final Literal o) { + public void setProperty(final URI uri, final URI prop, final Literal val) { try { + + final RepositoryConnection cxn = getWriteConnection(); -// cxn().remove(s, p, null); +// // remove the old value +// cxn.remove(uri, prop, null); - getWriteConnection().add(s, p, o); + // add the new value + cxn.add(uri, prop, val); + } catch (RuntimeException e) { + throw e; } catch (Exception e) { throw new RuntimeException(e); } @@ -179,10 +188,39 @@ } /** - * Add a vertex without consistency checking (does not check for a duplicate - * identifier). + * Set a multi-value property on an edge or vertex (remove the old + * values first). + * + * @see {@link BigdataElement} */ @Override + public void setProperty(final URI uri, final URI prop, + final Collection<Literal> vals) { + + try { + + final RepositoryConnection cxn = getWriteConnection(); + +// // remove the old value +// cxn.remove(uri, prop, null); + + // add the new values + for (Literal val : vals) { + cxn.add(uri, prop, val); + } + + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + + } + + /** + * Add a vertex. + */ + @Override public Vertex addVertex(final Object key) { try { @@ -192,23 +230,25 @@ final URI uri = factory.toVertexURI(vid); -// if (cxn().hasStatement(vertexURI, RDF.TYPE, VERTEX, false)) { + // do we need to check this? +// if (cxn().hasStatement(vertexURI, TYPE, VERTEX, false)) { // throw new IllegalArgumentException("vertex " + vid + " already exists"); // } - getWriteConnection().add(uri, RDF.TYPE, VERTEX); + getWriteConnection().add(uri, TYPE, VERTEX); return new BigdataVertex(uri, this); - } catch (Exception ex) { - throw new RuntimeException(ex); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); } } /** - * Add an edge without consistency checking (does not check for a duplicate - * identifier). + * Add an edge. */ @Override public Edge addEdge(final Object key, final Vertex from, final Vertex to, @@ -218,40 +258,44 @@ throw new IllegalArgumentException(); } - final String eid = key != null ? key.toString() : UUID.randomUUID().toString(); - - final URI edgeURI = factory.toEdgeURI(eid); - -// if (key != null) { +// if (key != null && !laxEdges) { // // final Edge edge = getEdge(key); // // if (edge != null) { // if (!(edge.getVertex(Direction.OUT).equals(from) && -// (edge.getVertex(Direction.OUT).equals(to)))) { +// (edge.getVertex(Direction.IN).equals(to)))) { // throw new IllegalArgumentException("edge already exists: " + key); // } // } // // } + final String eid = key != null ? key.toString() : UUID.randomUUID().toString(); + + final URI edgeURI = factory.toEdgeURI(eid); + try { -// if (cxn().hasStatement(edgeURI, RDF.TYPE, EDGE, false)) { + // do we need to check this? +// if (cxn().hasStatement(edgeURI, TYPE, EDGE, false)) { // throw new IllegalArgumentException("edge " + eid + " already exists"); // } final URI fromURI = factory.toVertexURI(from.getId().toString()); final URI toURI = factory.toVertexURI(to.getId().toString()); - getWriteConnection().add(fromURI, edgeURI, toURI); - getWriteConnection().add(edgeURI, RDF.TYPE, EDGE); - getWriteConnection().add(edgeURI, RDFS.LABEL, factory.toLiteral(label)); + final RepositoryConnection cxn = getWriteConnection(); + cxn.add(fromURI, edgeURI, toURI); + cxn.add(edgeURI, TYPE, EDGE); + cxn.add(edgeURI, LABEL, factory.toLiteral(label)); return new BigdataEdge(new StatementImpl(fromURI, edgeURI, toURI), this); - } catch (Exception ex) { - throw new RuntimeException(ex); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataQueryProjection.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataQueryProjection.java 2014-09-06 17:41:51 UTC (rev 8645) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataQueryProjection.java 2014-09-09 16:35:15 UTC (rev 8646) @@ -144,6 +144,10 @@ isEdge = true; final PartialEdge edge = elements.edges.get(stmt); + + if (log.isInfoEnabled()) { + log.info("copying properties from: " + uri + " to: " + stmt); + } edge.copyProperties(element); @@ -207,6 +211,15 @@ // } // // } + + if (log.isInfoEnabled()) { + for (PartialVertex v : elements.vertices.values()) { + log.info(v); + } + for (PartialEdge e : elements.edges.values()) { + log.info(e); + } + } return new BigdataGraphlet( elements.vertices.values(), elements.edges.values()); @@ -236,9 +249,9 @@ private void handleProperty(final PartialGraph elements, final Statement stmt) { -// if (log.isInfoEnabled()) { -// log.info(stmt); -// } + if (log.isInfoEnabled()) { + log.info(stmt); + } final URI uri = (URI) stmt.getSubject(); @@ -350,159 +363,4 @@ } - private static class PartialElement implements Element { - - private final String id; - - private final Map<String, Object> properties = - new LinkedHashMap<String, Object>(); - - public PartialElement(final String id) { - this.id = id; - } - - @Override - public Object getId() { - return id; - } - - @Override - @SuppressWarnings("unchecked") - public Object getProperty(final String name) { - return properties.get(name); - } - - @Override - public Set<String> getPropertyKeys() { - return properties.keySet(); - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - @Override - @SuppressWarnings("unchecked") - public Object removeProperty(final String key) { - return properties.remove(key); - } - - @Override - public void setProperty(final String key, final Object value) { - - /* - * Gracefully turn a single value property into a - * multi-valued property. - */ - if (properties.containsKey(key)) { - - final Object o = properties.get(key); - - if (o instanceof List) { - - @SuppressWarnings("unchecked") - final List<Object> list = (List<Object>) o; - list.add(value); - - } else { - - final List<Object> list = new LinkedList<Object>(); - list.add(o); - list.add(value); - - properties.put(key, list); - - } - - } else { - - properties.put(key, value); - - } - - } - - public void copyProperties(final PartialElement element) { - properties.putAll(element.properties); - } - - } - - private static class PartialVertex extends PartialElement implements Vertex { - - public PartialVertex(final String id) { - super(id); - } - - @Override - public Edge addEdge(String arg0, Vertex arg1) { - throw new UnsupportedOperationException(); - } - - @Override - public Iterable<Edge> getEdges(Direction arg0, String... arg1) { - throw new UnsupportedOperationException(); - } - - @Override - public Iterable<Vertex> getVertices(Direction arg0, String... arg1) { - throw new UnsupportedOperationException(); - } - - @Override - public VertexQuery query() { - throw new UnsupportedOperationException(); - } - - } - - private static class PartialEdge extends PartialElement implements Edge { - - private String label; - - private Vertex from; - - private Vertex to; - - public PartialEdge(final String id) { - super(id); - } - - @Override - public String getLabel() { - return label; - } - - @Override - public Vertex getVertex(final Direction dir) throws IllegalArgumentException { - - if (dir == Direction.OUT) { - return from; - } else if (dir == Direction.IN) { - return to; - } - - throw new IllegalArgumentException(); - - } - - private boolean isComplete() { - return label != null && from != null && to != null; - } - - private void setLabel(final String label) { - this.label = label; - } - - private void setFrom(final Vertex v) { - this.from = v; - } - - private void setTo(final Vertex v) { - this.to = v; - } - - } - } Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialEdge.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialEdge.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialEdge.java 2014-09-09 16:35:15 UTC (rev 8646) @@ -0,0 +1,82 @@ +/** +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.blueprints; + +import com.tinkerpop.blueprints.Direction; +import com.tinkerpop.blueprints.Edge; +import com.tinkerpop.blueprints.Vertex; + +public class PartialEdge extends PartialElement implements Edge { + + private String label; + + private Vertex from; + + private Vertex to; + + public PartialEdge(final String id) { + super(id); + } + + @Override + public String getLabel() { + return label; + } + + @Override + public Vertex getVertex(final Direction dir) throws IllegalArgumentException { + + if (dir == Direction.OUT) { + return from; + } else if (dir == Direction.IN) { + return to; + } + + throw new IllegalArgumentException(); + + } + + public void setLabel(final String label) { + this.label = label; + } + + public void setFrom(final Vertex v) { + this.from = v; + } + + public void setTo(final Vertex v) { + this.to = v; + } + + public String toString() { + final StringBuilder sb = new StringBuilder(); + sb.append("id: " + super.getId()); + sb.append(", from: " + from.getId()); + sb.append(", to: " + to.getId()); + sb.append(", label: " + label); + sb.append(", props: "); + super.appendProps(sb); + return sb.toString(); + } + +} \ No newline at end of file Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialEdge.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialElement.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialElement.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialElement.java 2014-09-09 16:35:15 UTC (rev 8646) @@ -0,0 +1,124 @@ +/** +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.blueprints; + +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.tinkerpop.blueprints.Element; + +public class PartialElement implements Element { + + private final String id; + + private final Map<String, Object> properties = + new LinkedHashMap<String, Object>(); + + public PartialElement(final String id) { + this.id = id; + } + + @Override + public Object getId() { + return id; + } + + @Override + @SuppressWarnings("unchecked") + public Object getProperty(final String name) { + return properties.get(name); + } + + @Override + public Set<String> getPropertyKeys() { + return properties.keySet(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + @Override + @SuppressWarnings("unchecked") + public Object removeProperty(final String key) { + return properties.remove(key); + } + + @Override + public void setProperty(final String key, final Object value) { + + /* + * Gracefully turn a single value property into a + * multi-valued property. + */ + if (properties.containsKey(key)) { + + final Object o = properties.get(key); + + if (o instanceof List) { + + @SuppressWarnings("unchecked") + final List<Object> list = (List<Object>) o; + list.add(value); + + } else { + + final List<Object> list = new LinkedList<Object>(); + list.add(o); + list.add(value); + + properties.put(key, list); + + } + + } else { + + properties.put(key, value); + + } + + } + + public void copyProperties(final PartialElement element) { + properties.putAll(element.properties); + } + + public String toString() { + final StringBuilder sb = new StringBuilder(); + sb.append("id: " + id); + sb.append(", props: "); + appendProps(sb); + return sb.toString(); + } + + protected void appendProps(final StringBuilder sb) { + for (Map.Entry<String, Object> prop : properties.entrySet()) { + sb.append(prop.getKey()).append("=").append(prop.getValue()); + } + } + +} Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialElement.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialVertex.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialVertex.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialVertex.java 2014-09-09 16:35:15 UTC (rev 8646) @@ -0,0 +1,56 @@ +/** +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.blueprints; + +import com.tinkerpop.blueprints.Direction; +import com.tinkerpop.blueprints.Edge; +import com.tinkerpop.blueprints.Vertex; +import com.tinkerpop.blueprints.VertexQuery; + +public class PartialVertex extends PartialElement implements Vertex { + + public PartialVertex(final String id) { + super(id); + } + + @Override + public Edge addEdge(String arg0, Vertex arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public Iterable<Edge> getEdges(Direction arg0, String... arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public Iterable<Vertex> getVertices(Direction arg0, String... arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public VertexQuery query() { + throw new UnsupportedOperationException(); + } + +} Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/PartialVertex.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-06 17:41:59
|
Revision: 8645 http://sourceforge.net/p/bigdata/code/8645 Author: thompsonbry Date: 2014-09-06 17:41:51 +0000 (Sat, 06 Sep 2014) Log Message: ----------- Further reduction of logging in CI. Modified Paths: -------------- branches/SESAME_2_7/bigdata/src/resources/logging/log4j-dev.properties Modified: branches/SESAME_2_7/bigdata/src/resources/logging/log4j-dev.properties =================================================================== --- branches/SESAME_2_7/bigdata/src/resources/logging/log4j-dev.properties 2014-09-06 17:40:34 UTC (rev 8644) +++ branches/SESAME_2_7/bigdata/src/resources/logging/log4j-dev.properties 2014-09-06 17:41:51 UTC (rev 8645) @@ -174,7 +174,7 @@ #log4j.logger.com.bigdata.bop.engine.AbstractRunningQuery=ALL #log4j.logger.com.bigdata.bop.engine.ChunkedRunningQuery=ALL #log4j.logger.com.bigdata.bop.engine.RunState=INFO -log4j.logger.com.bigdata.bop.joinGraph.rto.JGraph=INFO +#log4j.logger.com.bigdata.bop.joinGraph.rto.JGraph=INFO #log4j.logger.com.bigdata.bop.joinGraph.rto.Vertex=ALL #log4j.logger.com.bigdata.bop.joinGraph.rto.Path=ALL #log4j.logger.com.bigdata.bop.engine.RunningQuery$ChunkTask=ALL @@ -261,9 +261,9 @@ #log4j.logger.com.bigdata.relation.rule.eval.NestedSubqueryWithJoinThreadsTask=DEBUG #log4j.logger.com.bigdata.rdf.sail.TestNestedUnions=ALL -log4j.logger.com.bigdata.util.httpd.NanoHTTPD=DEBUG -log4j.logger.com.bigdata.util.httpd.AbstractHTTPD=DEBUG -log4j.logger.com.bigdata.rdf.sail.webapp.NanoSparqlServer=ALL +#log4j.logger.com.bigdata.util.httpd.NanoHTTPD=DEBUG +#log4j.logger.com.bigdata.util.httpd.AbstractHTTPD=DEBUG +#log4j.logger.com.bigdata.rdf.sail.webapp.NanoSparqlServer=ALL # Lehigh benchmark integration log4j.logger.edu.lehigh.swat.bench.ubt.bigdata=INFO @@ -282,20 +282,20 @@ # Normal data loader (single threaded). log4j.logger.com.bigdata.rdf.store.DataLoader=INFO -log4j.logger.com.bigdata.ha=INFO -log4j.logger.com.bigdata.txLog=INFO -log4j.logger.com.bigdata.haLog=INFO +#log4j.logger.com.bigdata.ha=INFO +#log4j.logger.com.bigdata.txLog=INFO +#log4j.logger.com.bigdata.haLog=INFO #log4j.logger.com.bigdata.rwstore=ALL -log4j.logger.com.bigdata.journal=INFO +#log4j.logger.com.bigdata.journal=INFO #log4j.logger.com.bigdata.journal.AbstractBufferStrategy=ALL -log4j.logger.com.bigdata.journal.jini.ha=ALL +#log4j.logger.com.bigdata.journal.jini.ha=ALL #log4j.logger.com.bigdata.service.jini.lookup=ALL log4j.logger.com.bigdata.quorum=ALL log4j.logger.com.bigdata.quorum.zk=ALL #log4j.logger.com.bigdata.quorum.quorumState=ALL,destPlain #log4j.logger.com.bigdata.io.writecache=ALL -log4j.logger.benchmark.bigdata.TestBSBM=INFO +#log4j.logger.benchmark.bigdata.TestBSBM=INFO # Test suite logger. #log4j.logger.junit=INFO This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-06 17:40:42
|
Revision: 8644 http://sourceforge.net/p/bigdata/code/8644 Author: thompsonbry Date: 2014-09-06 17:40:34 +0000 (Sat, 06 Sep 2014) Log Message: ----------- enabling thread count checks in CI. Modified Paths: -------------- branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxyTestCase.java Modified: branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java 2014-09-06 17:39:26 UTC (rev 8643) +++ branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java 2014-09-06 17:40:34 UTC (rev 8644) @@ -37,7 +37,6 @@ import junit.extensions.proxy.IProxyTest; import junit.framework.Test; -import org.apache.log4j.Logger; import org.openrdf.model.Resource; import org.openrdf.query.Binding; import org.openrdf.query.BindingSet; @@ -157,18 +156,77 @@ * proxy is created for each test, while one instance of the delegate serves * an entire suite of tests.) */ + + private int startupActiveThreads = 0; + @Override protected void setUp() throws Exception { - getOurDelegate().setUp(this); + + startupActiveThreads = Thread.currentThread().getThreadGroup().activeCount(); + + getOurDelegate().setUp(this); + } + private static boolean s_checkThreads = true; + + @Override protected void tearDown() throws Exception { + getOurDelegate().tearDown(this); + + if (s_checkThreads) { + + final ThreadGroup grp = Thread.currentThread().getThreadGroup(); + final int tearDownActiveThreads = grp.activeCount(); + if (startupActiveThreads != tearDownActiveThreads) { + final Thread[] threads = new Thread[tearDownActiveThreads]; + grp.enumerate(threads); + final StringBuilder info = new StringBuilder(); + boolean first = true; + for (Thread t : threads) { + if (t == null) + continue; + if (!first) + info.append(','); + info.append("[" + t.getName() + "]"); + first = false; + } + + final String failMessage = "Threads left active after task" + + ": test=" + + getName()// + + ", delegate=" + getOurDelegate().getClass().getName() + + ", startupCount=" + startupActiveThreads + + ", teardownCount=" + tearDownActiveThreads + + ", thisThread=" + Thread.currentThread().getName() + + ", threads: " + info; + + if (grp.activeCount() != startupActiveThreads) + log.error(failMessage); + + /* + * Wait up to 2 seconds for threads to die off so the next test + * will run more cleanly. + */ + for (int i = 0; i < 20; i++) { + Thread.sleep(100); + if (grp.activeCount() != startupActiveThreads) + break; + } + + } + + } + + super.tearDown(); + } /** * The properties as configured by the delegate. */ + @Override public Properties getProperties() { return getOurDelegate().getProperties(); } Modified: branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxyTestCase.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxyTestCase.java 2014-09-06 17:39:26 UTC (rev 8643) +++ branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxyTestCase.java 2014-09-06 17:40:34 UTC (rev 8644) @@ -155,68 +155,73 @@ * an entire suite of tests.) */ -// private int startupActiveThreads = 0; + private int startupActiveThreads = 0; + @Override public void setUp() throws Exception { -// startupActiveThreads = Thread.currentThread().getThreadGroup().activeCount(); + startupActiveThreads = Thread.currentThread().getThreadGroup().activeCount(); getOurDelegate().setUp(this); } -// private static boolean s_checkThreads = true; - + private static boolean s_checkThreads = true; + + @Override public void tearDown() throws Exception { getOurDelegate().tearDown(this); -// if (s_checkThreads) { -// -// final ThreadGroup grp = Thread.currentThread().getThreadGroup(); -// final int tearDownActiveThreads = grp.activeCount(); -// if (startupActiveThreads != tearDownActiveThreads) { -// final Thread[] threads = new Thread[tearDownActiveThreads]; -// grp.enumerate(threads); -// final StringBuilder info = new StringBuilder(); -// boolean first = true; -// for (Thread t : threads) { -// if (t == null) -// continue; -// if(!first) -// info.append(','); -// info.append("[" + t.getName() + "]"); -// first = false; -// } -// -// final String failMessage = "Threads left active after task" -// +": test=" + getName()// -// + ", delegate="+getOurDelegate().getClass().getName() -// + ", startupCount=" + startupActiveThreads -// + ", teardownCount=" + tearDownActiveThreads -// + ", thisThread="+Thread.currentThread().getName() -// + ", threads: " + info; -// -// if (grp.activeCount() != startupActiveThreads) -// log.error(failMessage); -// -// /* -// * Wait up to 2 seconds for threads to die off so the next test -// * will run more cleanly. -// */ -// for (int i = 0; i < 20; i++) { -// Thread.sleep(100); -// if (grp.activeCount() != startupActiveThreads) -// break; -// } -// -// } -// -// } - - super.tearDown(); + if (s_checkThreads) { + + final ThreadGroup grp = Thread.currentThread().getThreadGroup(); + final int tearDownActiveThreads = grp.activeCount(); + if (startupActiveThreads != tearDownActiveThreads) { + final Thread[] threads = new Thread[tearDownActiveThreads]; + grp.enumerate(threads); + final StringBuilder info = new StringBuilder(); + boolean first = true; + for (Thread t : threads) { + if (t == null) + continue; + if (!first) + info.append(','); + info.append("[" + t.getName() + "]"); + first = false; + } + + final String failMessage = "Threads left active after task" + + ": test=" + + getName()// + + ", delegate=" + getOurDelegate().getClass().getName() + + ", startupCount=" + startupActiveThreads + + ", teardownCount=" + tearDownActiveThreads + + ", thisThread=" + Thread.currentThread().getName() + + ", threads: " + info; + + if (grp.activeCount() != startupActiveThreads) + log.error(failMessage); + + /* + * Wait up to 2 seconds for threads to die off so the next test + * will run more cleanly. + */ + for (int i = 0; i < 20; i++) { + Thread.sleep(100); + if (grp.activeCount() != startupActiveThreads) + break; + } + + } + + } + + super.tearDown(); + } + @Override public Properties getProperties() { return getOurDelegate().getProperties(); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-06 17:39:29
|
Revision: 8643 http://sourceforge.net/p/bigdata/code/8643 Author: thompsonbry Date: 2014-09-06 17:39:26 +0000 (Sat, 06 Sep 2014) Log Message: ----------- code cleanup Modified Paths: -------------- branches/SESAME_2_7/bigdata/src/test/com/bigdata/journal/ProxyTestCase.java Modified: branches/SESAME_2_7/bigdata/src/test/com/bigdata/journal/ProxyTestCase.java =================================================================== --- branches/SESAME_2_7/bigdata/src/test/com/bigdata/journal/ProxyTestCase.java 2014-09-06 16:05:56 UTC (rev 8642) +++ branches/SESAME_2_7/bigdata/src/test/com/bigdata/journal/ProxyTestCase.java 2014-09-06 17:39:26 UTC (rev 8643) @@ -144,7 +144,8 @@ */ private int startupActiveThreads = 0; - + + @Override public void setUp() throws Exception { startupActiveThreads = Thread.currentThread().getThreadGroup().activeCount(); @@ -155,58 +156,62 @@ private static boolean s_checkThreads = true; + @Override public void tearDown() throws Exception { getOurDelegate().tearDown(this); - + if (s_checkThreads) { - final ThreadGroup grp = Thread.currentThread().getThreadGroup(); - final int tearDownActiveThreads = grp.activeCount(); - int nremaining = 0; - if (startupActiveThreads != tearDownActiveThreads) { - final Thread[] threads = new Thread[tearDownActiveThreads]; - grp.enumerate(threads); - final StringBuilder info = new StringBuilder(); - boolean first = true; + final ThreadGroup grp = Thread.currentThread().getThreadGroup(); + final int tearDownActiveThreads = grp.activeCount(); + int nremaining = 0; + if (startupActiveThreads != tearDownActiveThreads) { + final Thread[] threads = new Thread[tearDownActiveThreads]; + grp.enumerate(threads); + final StringBuilder info = new StringBuilder(); + boolean first = true; for (Thread t : threads) { if (t == null) continue; - if(!first) - info.append(','); + if (!first) + info.append(','); info.append("[" + t.getName() + "]"); first = false; nremaining++; - } - - final String failMessage = "Threads left active after task" - +": test=" + getName()// - + ", delegate="+getOurDelegate().getClass().getName() - + ", startupCount=" + startupActiveThreads - + ", teardownCount=" + nremaining - + ", thisThread="+Thread.currentThread().getName() - + ", threads: " + info; - + } + + final String failMessage = "Threads left active after task" + + ": test=" + + getName()// + + ", delegate=" + getOurDelegate().getClass().getName() + + ", startupCount=" + startupActiveThreads + + ", teardownCount=" + nremaining + ", thisThread=" + + Thread.currentThread().getName() + ", threads: " + + info; + if (nremaining > startupActiveThreads) - log.error(failMessage); + log.error(failMessage); /* * Wait up to 2 seconds for threads to die off so the next test * will run more cleanly. */ - for (int i = 0; i < 20; i++) { - Thread.sleep(100); - if (grp.activeCount() != startupActiveThreads) - break; - } + for (int i = 0; i < 20; i++) { + Thread.sleep(100); + if (grp.activeCount() != startupActiveThreads) + break; + } - } - + } + } - - super.tearDown(); + + super.tearDown(); + } + @Override public Properties getProperties() { return getOurDelegate().getProperties(); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-06 16:05:59
|
Revision: 8642 http://sourceforge.net/p/bigdata/code/8642 Author: thompsonbry Date: 2014-09-06 16:05:56 +0000 (Sat, 06 Sep 2014) Log Message: ----------- Reduced log levels. Modified Paths: -------------- branches/SESAME_2_7/bigdata/src/resources/logging/log4j-dev.properties Modified: branches/SESAME_2_7/bigdata/src/resources/logging/log4j-dev.properties =================================================================== --- branches/SESAME_2_7/bigdata/src/resources/logging/log4j-dev.properties 2014-09-06 16:04:00 UTC (rev 8641) +++ branches/SESAME_2_7/bigdata/src/resources/logging/log4j-dev.properties 2014-09-06 16:05:56 UTC (rev 8642) @@ -298,9 +298,9 @@ log4j.logger.benchmark.bigdata.TestBSBM=INFO # Test suite logger. -log4j.logger.junit=INFO +#log4j.logger.junit=INFO #log4j.logger.junit=DEBUG -log4j.logger.com.bigdata.btree.AbstractBTreeTestCase=INFO +#log4j.logger.com.bigdata.btree.AbstractBTreeTestCase=INFO #log4j.logger.com.bigdata.rdf.sail.contrib.TestMillisecondPrecisionForInlineDateTimes=ALL This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-06 16:04:06
|
Revision: 8641 http://sourceforge.net/p/bigdata/code/8641 Author: thompsonbry Date: 2014-09-06 16:04:00 +0000 (Sat, 06 Sep 2014) Log Message: ----------- reduced log level. Modified Paths: -------------- branches/SESAME_2_7/bigdata/src/java/com/bigdata/service/DataService.java Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/service/DataService.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/service/DataService.java 2014-09-04 11:04:48 UTC (rev 8640) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/service/DataService.java 2014-09-06 16:04:00 UTC (rev 8641) @@ -558,9 +558,9 @@ * report that it is closed. At that point the data service can * not start and will shutdown. */ + if(log.isInfoEnabled()) + log.info("Store manager not open - will shutdown."); - log.fatal("Store manager not open - will shutdown."); - // shutdown the data service. dataService.shutdownNow(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-04 11:04:56
|
Revision: 8640 http://sourceforge.net/p/bigdata/code/8640 Author: thompsonbry Date: 2014-09-04 11:04:48 +0000 (Thu, 04 Sep 2014) Log Message: ----------- Removed hard reference that was pinning the keys of the weak hash map. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/ReadWriteLockManager.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/ReadWriteLockManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/ReadWriteLockManager.java 2014-09-03 15:12:24 UTC (rev 8639) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/ReadWriteLockManager.java 2014-09-04 11:04:48 UTC (rev 8640) @@ -65,10 +65,14 @@ */ private static final long LOCK_TIMEOUT_MILLIS = Long.MAX_VALUE;// 10000; - /** - * The unisolated persistence capable data structure. + /* + * Note: This creates a hard reference that defeats the weak keys in the + * hash map. */ - final private ICheckpointProtocol committer; +// /** +// * The unisolated persistence capable data structure. +// */ +// final private ICheckpointProtocol committer; /** * True iff the caller's {@link ICheckpointProtocol} object was read-only. @@ -461,7 +465,7 @@ */ private ReadWriteLockManager(final ICheckpointProtocol index) { - this.committer = index; +// this.committer = index; if (this.readOnly = index.isReadOnly()) { @@ -513,12 +517,12 @@ } - @Override - final public String toString() { +// @Override +// final public String toString() { +// +// return getClass().getName() + "{committer=" + committer + ",readOnly=" +// + readOnly + "}"; +// +// } - return getClass().getName() + "{committer=" + committer + ",readOnly=" - + readOnly + "}"; - - } - } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-03 15:12:40
|
Revision: 8639 http://sourceforge.net/p/bigdata/code/8639 Author: thompsonbry Date: 2014-09-03 15:12:24 +0000 (Wed, 03 Sep 2014) Log Message: ----------- Committing to CI a fix for #855 (reads must not drive evictions for the unisolated view of the BTree and HTree) and #1005 (Unisolated BTree and HTree should be marked as invalid after error during eviction of a dirty leaf or node). See the tickets for more detail about this change set. I have run much of the total test suite locally, including the HA test suite. We are testing for performance regressions with this change set. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/AbstractBTree.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/AbstractNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/BTree.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/DefaultEvictionListener.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/AbstractHTree.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/DefaultEvictionListener.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/HTree.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractCommitTimeIndex.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/ICommitter.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/EventReceiver.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/LoadBalancerService.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/stream/Stream.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/EvictionError.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IReadWriteLockManager.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexInconsistentError.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/ReadWriteLockManager.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/AbstractBTree.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/AbstractBTree.java 2014-09-02 20:06:22 UTC (rev 8638) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/AbstractBTree.java 2014-09-03 15:12:24 UTC (rev 8639) @@ -24,6 +24,7 @@ /* * Created on Dec 19, 2006 * + * RESYNC */ package com.bigdata.btree; @@ -40,6 +41,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.FutureTask; +import java.util.concurrent.locks.Lock; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -133,7 +135,6 @@ * </p> * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ * * @see KeyBuilder */ @@ -168,6 +169,15 @@ final protected static String ERROR_TRANSIENT = "Transient"; /** + * An unisolated index view is in an error state. It must be discarded and + * reloaded from the current checkpoint record. + * + * @see <a href="http://trac.bigdata.com/ticket/1005"> Invalidate BTree + * objects if error occurs during eviction </a> + */ + final protected static String ERROR_ERROR_STATE = "Index is in error state"; + + /** * Log for btree opeations. */ protected static final Logger log = Logger.getLogger(AbstractBTree.class); @@ -250,7 +260,7 @@ * mutation. */ final protected boolean readOnly; - + /** * Optional cache for {@link INodeData} and {@link ILeafData} instances and * always <code>null</code> if the B+Tree is transient. @@ -258,6 +268,14 @@ protected final ILRUCache<Long, Object> storeCache; /** + * Hard reference iff the index is mutable (aka unisolated) allows us to + * avoid patterns that create short life time versions of the object to + * protect {@link ICheckpointProtocol#writeCheckpoint2()} and similar + * operations. + */ + private final IReadWriteLockManager lockManager; + + /** * The branching factor for the btree. */ final protected int branchingFactor; @@ -547,6 +565,21 @@ protected volatile AbstractNode<?> root; /** + * This field is set if an error is encountered that renders an unisolated + * index object unusable. For example, this can occur if an error was + * detected during incremental eviction of dirty nodes for a mutable index + * view since that means that there are partly serialized (and possibly + * inconsistenly serialized) evicted pages. Once this becomes non- + * <code>null</code> the index MUST be reloaded from the most recent + * checkpoint before it can be used (that is, you need to obtain a new view + * of the unisolated index since this field is sticky once set). + * + * @see <a href="http://trac.bigdata.com/ticket/1005"> Invalidate BTree + * objects if error occurs during eviction </a> + */ + protected volatile Throwable error; + + /** * An optional bloom filter that will be used to filter point tests against * <i>this</i> {@link AbstractBTree}. A bloom filter provides a strong * guarantee when it reports that a key was not found, but only a weak @@ -960,7 +993,7 @@ this.store = store; this.readOnly = readOnly; - + // /* // * The Memoizer is not used by the mutable B+Tree since it is not safe // * for concurrent operations. @@ -1042,6 +1075,8 @@ } + lockManager = ReadWriteLockManager.getLockManager(this); + } /** @@ -1977,7 +2012,8 @@ } }; - + + @Override final public Object insert(Object key, Object value) { key = metadata.getTupleSerializer().serializeKey(key); @@ -1999,6 +2035,7 @@ } + @Override final public byte[] insert(final byte[] key, final byte[] value) { if (key == null) @@ -2120,6 +2157,7 @@ } + @Override final public Object remove(Object key) { key = metadata.getTupleSerializer().serializeKey(key); @@ -2147,6 +2185,7 @@ * Remove the tuple under that key (will write a delete marker if delete * markers are enabled). */ + @Override final public byte[] remove(final byte[] key) { final Tuple tuple; @@ -2232,8 +2271,10 @@ * and dropping indices vs removing the entries in an individual * {@link AbstractBTree}. */ + @Override abstract public void removeAll(); + @Override public Object lookup(Object key) { key = metadata.getTupleSerializer().serializeKey(key); @@ -2252,6 +2293,7 @@ } + @Override public byte[] lookup(final byte[] key) { final Tuple tuple = lookup(key, getLookupTuple()); @@ -2339,6 +2381,7 @@ } + @Override public boolean contains(Object key) { key = metadata.getTupleSerializer().serializeKey(key); @@ -2359,6 +2402,7 @@ * * @todo add unit test to btree suite w/ and w/o delete markers. */ + @Override public boolean contains(final byte[] key) { if (key == null) @@ -2405,6 +2449,7 @@ } + @Override public long indexOf(final byte[] key) { if (key == null) @@ -2419,6 +2464,7 @@ } + @Override public byte[] keyAt(final long index) { if (index < 0) @@ -2433,6 +2479,7 @@ } + @Override public byte[] valueAt(final long index) { final Tuple tuple = getLookupTuple(); @@ -2466,6 +2513,7 @@ * IRangeQuery */ + @Override final public long rangeCountExact(final byte[] fromKey, final byte[] toKey) { if (!metadata.getDeleteMarkers()) { @@ -2509,6 +2557,7 @@ } + @Override final public long rangeCount() { return rangeCount(null, null); @@ -2546,6 +2595,7 @@ * lookup of the both keys. If both keys are <code>null</code>, then the * cost is zero (no IOs). */ + @Override final public long rangeCount(final byte[] fromKey, final byte[] toKey) { if (fromKey == null && toKey == null) { @@ -2604,6 +2654,7 @@ * considering all sources at once. It uses a range iterator scan visiting * both deleted and undeleted tuples for that. */ + @Override public long rangeCountExactWithDeleted(final byte[] fromKey, final byte[] toKey) { @@ -2649,6 +2700,7 @@ } + @Override final public ITupleIterator rangeIterator() { return rangeIterator(null, null); @@ -2675,6 +2727,7 @@ } + @Override final public ITupleIterator rangeIterator(byte[] fromKey, byte[] toKey) { return rangeIterator(fromKey, toKey, 0/* capacity */, @@ -2767,6 +2820,7 @@ * @todo add support to the iterator construct for filtering by a tuple * revision timestamp range. */ + @Override public ITupleIterator rangeIterator(// final byte[] fromKey,// final byte[] toKey,// @@ -3115,6 +3169,7 @@ } + @Override public Object submit(final byte[] key, final ISimpleIndexProcedure proc) { // conditional range check on the key. @@ -3126,6 +3181,7 @@ } @SuppressWarnings("unchecked") + @Override public void submit(final byte[] fromKey, final byte[] toKey, final IKeyRangeIndexProcedure proc, final IResultHandler handler) { @@ -3147,6 +3203,7 @@ } @SuppressWarnings("unchecked") + @Override public void submit(final int fromIndex, final int toIndex, final byte[][] keys, final byte[][] vals, final AbstractKeyArrayIndexProcedureConstructor ctor, @@ -3369,8 +3426,68 @@ } - doSyncTouch(node); + /** + * At this point we know that the B+Tree object is a mutable data + * structure (!readOnly). If we can prove that the current thread is + * conducting a read-only operation on the B+Tree, then we DO NOT touch + * the node in order to prevent having read-only operations drive + * evictions. This test relies on the UnisolatedReadWriteIndex class to + * provide concurrency control for such interleaved read-only and + * mutation operations on an unisolated (aka mutable) index. + * + * There are three broad ways in which concurrency controls for the + * index classes are realized: + * + * (1) Explicit synchronization. For example, the AbstractJournal uses + * explicit synchronization to protect operations on the unisolated + * Name2Addr. + * + * (2) Explicit pre-declaration of ordered locks. The ConcurrencyManager + * and AbstractTask support this protection mechanism. The task runs + * once it has acquired the locks for the declared unisolated indices. + * + * (3) UnisolatedReadWriteIndex. This is used to provide transparent + * concurrency control for unisolated indices for the triple and quad + * store classes. + * + * The index is mutable (unisolated view). If the thread owns a + * read-only lock then the operation is read-only and we MUST NOT drive + * evictions from this thread. + * + * Note: The order in which we obtain the real read lock and increment + * (and decrement) the per-thread read lock counter on the AbstractBTree + * is not critical because AbstractBTree.touch() relies on the thread + * both owning the read lock and having the per-thread read lock counter + * incremented for that thread. + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: + * Child does not have persistent identity </a> + */ + final int rcount = lockManager.getReadLockCount(); + + if (rcount > 0) { + + /* + * The current thread is executing a read-only operation against the + * mutable index view. DO NOT TOUCH THE EVICTION QUEUE. + */ + + // NOP + + } else { + /* + * The current thread has not promised that it is using a read-only + * operation. Either the operation is a mutation or the index is + * being managed by one of the other two concurrency control + * patterns. In any of these cases, we touch the write retention + * queue for this node reference. + */ + + doSyncTouch(node); + + } + } /** @@ -3433,7 +3550,7 @@ */ // assert isReadOnly() || ndistinctOnWriteRetentionQueue > 0; - + node.referenceCount++; if (!writeRetentionQueue.add(node)) { @@ -3598,6 +3715,15 @@ } +// private void badNode(final AbstractNode<?> node) { +//// try { +//// Thread.sleep(50); +//// } catch (InterruptedException e) { +//// // ignore; +//// } +// throw new AssertionError("ReadOnly and identity: " + node.identity); +// } + /** * Codes the node and writes the coded record on the store (non-recursive). * The node MUST be dirty. If the node has a parent, then the parent is @@ -3617,7 +3743,10 @@ * @return The persistent identity assigned by the store. */ protected long writeNodeOrLeaf(final AbstractNode<?> node) { - + + if (error != null) + throw new IllegalStateException(ERROR_ERROR_STATE, error); + assert root != null; // i.e., isOpen(). assert node != null; assert node.btree == this; @@ -3641,6 +3770,9 @@ * TestMROWTransactions might also demonstrate an issue * occasionally. If so, then check for the same root cause. */ +// if (node.isReadOnly()) { +// badNode(node); // supports debugging +// } assert !node.isReadOnly(); assertNotReadOnly(); @@ -3741,6 +3873,14 @@ // No longer dirty (prevents re-coding on re-eviction). node.setDirty(false); +// if (node.writing == null) { +// log.warn("Concurrent modification of thread guard", new RuntimeException("WTF2: " + node.hashCode())); +// +// throw new AssertionError("Concurrent modification of thread guard"); +// } + +// node.writing = null; + return 0L; } @@ -3768,7 +3908,7 @@ btreeCounters.bytesWritten += nbytes; - btreeCounters.bytesOnStore_nodesAndLeaves.addAndGet(nbytes); + btreeCounters.bytesOnStore_nodesAndLeaves.addAndGet(nbytes); } @@ -3830,6 +3970,14 @@ } +// if (node.writing == null) { +// log.warn("Concurrent modification of thread guard", new RuntimeException("WTF2: " + node.hashCode())); +// +// throw new AssertionError("Concurrent modification of thread guard"); +// } +// +// node.writing = null; + return addr; } @@ -3856,40 +4004,6 @@ if (addr == IRawStore.NULL) throw new IllegalArgumentException(); -// final Long addr2 = Long.valueOf(addr); -// -// if (storeCache != null) { -// -// // test cache : will touch global LRU iff found. -// final IAbstractNodeData data = (IAbstractNodeData) storeCache -// .get(addr); -// -// if (data != null) { -// -// // Node and Leaf MUST NOT make it into the global LRU or store -// // cache! -// assert !(data instanceof AbstractNode<?>); -// -// final AbstractNode<?> node; -// -// if (data.isLeaf()) { -// -// node = nodeSer.nodeFactory.allocLeaf(this, addr, -// (ILeafData) data); -// -// } else { -// -// node = nodeSer.nodeFactory.allocNode(this, addr, -// (INodeData) data); -// -// } -// -// // cache hit. -// return node; -// -// } -// -// } final ByteBuffer tmp; { @@ -3946,21 +4060,6 @@ } -// if (storeCache != null) { -// -// // update cache : will touch global LRU iff cache is modified. -// final IAbstractNodeData data2 = (IAbstractNodeData) storeCache -// .putIfAbsent(addr2, data); -// -// if (data2 != null) { -// -// // concurrent insert, use winner's value. -// data = data2; -// -// } -// -// } - // wrap as Node or Leaf. final AbstractNode<?> node = nodeSer.wrap(this, addr, data); @@ -4061,6 +4160,7 @@ /** * Returns the hard reference. */ + @Override public T get() { return ref; @@ -4070,6 +4170,7 @@ /** * Overridden as a NOP. */ + @Override public void clear() { // NOP @@ -4163,7 +4264,7 @@ */ int getMaxRecLen() { - return metadata.getMaxRecLen(); + return metadata.getMaxRecLen(); } @@ -4304,4 +4405,24 @@ } + @Override + final public Lock readLock() { + + return lockManager.readLock(); + + } + + @Override + final public Lock writeLock() { + + return lockManager.writeLock(); + + } + + @Override + final public int getReadLockCount() { + + return lockManager.getReadLockCount(); + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/AbstractNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/AbstractNode.java 2014-09-02 20:06:22 UTC (rev 8638) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/AbstractNode.java 2014-09-03 15:12:24 UTC (rev 8639) @@ -23,7 +23,6 @@ */ /* * Created on Nov 15, 2006 - * */ package com.bigdata.btree; @@ -37,7 +36,6 @@ import com.bigdata.btree.data.IAbstractNodeData; import com.bigdata.btree.data.IKeysData; -import com.bigdata.btree.data.ISpannedTupleCountData; import com.bigdata.btree.filter.EmptyTupleIterator; import com.bigdata.btree.raba.IRaba; import com.bigdata.btree.raba.MutableKeyBuffer; @@ -51,7 +49,6 @@ * Abstract node supporting incremental persistence and copy-on-write semantics. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public abstract class AbstractNode<T extends AbstractNode /* @@ -539,7 +536,7 @@ parent = (Node) parent.copyOnWrite(oldId); } - + /* * Replace the reference to this child with the reference to the * new child. This makes the old child inaccessible via Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/BTree.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/BTree.java 2014-09-02 20:06:22 UTC (rev 8638) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/BTree.java 2014-09-03 15:12:24 UTC (rev 8639) @@ -23,7 +23,6 @@ */ /* * Created on Nov 15, 2006 - * */ package com.bigdata.btree; @@ -288,7 +287,7 @@ * and otherwise <code>null</code>. */ private final ByteArrayBuffer recordAddrBuf; - + // /** // * The last address from which the {@link IndexMetadata} record was read or // * on which it was written. @@ -385,7 +384,7 @@ */ recordAddrBuf = readOnly ? null : new ByteArrayBuffer(Bytes.SIZEOF_LONG); - + } /** @@ -900,23 +899,31 @@ * @see https://sourceforge.net/apps/trac/bigdata/ticket/343 * @see https://sourceforge.net/apps/trac/bigdata/ticket/440 */ -// final Lock lock = new UnisolatedReadWriteIndex(this).writeLock(); - final Lock lock = UnisolatedReadWriteIndex.getReadWriteLock(this).writeLock(); - lock.lock(); - try { + final Lock lock = writeLock(); + lock.lock(); + try { + /** + * Do not permit checkpoint if the index is in an error state. + * + * @see <a href="http://trac.bigdata.com/ticket/1005"> Invalidate + * BTree objects if error occurs during eviction </a> + */ + if (error != null) + throw new IllegalStateException(ERROR_ERROR_STATE, error); + //synchronized(this) { + if (/* autoCommit && */needsCheckpoint()) { + + /* + * Flush the btree, write a checkpoint record, and return the + * address of that checkpoint record. The [checkpoint] reference + * is also updated. + */ + + return _writeCheckpoint2(); + + } + //} - if (/* autoCommit && */needsCheckpoint()) { - - /* - * Flush the btree, write a checkpoint record, and return the - * address of that checkpoint record. The [checkpoint] reference - * is also updated. - */ - - return _writeCheckpoint2(); - - } - /* * There have not been any writes on this btree or auto-commit is * disabled. @@ -1110,14 +1117,14 @@ @Override final public long getRecordVersion() { - return recordVersion; + return recordVersion; } @Override final public long getMetadataAddr() { - return metadata.getMetadataAddr(); + return metadata.getMetadataAddr(); } @@ -1313,7 +1320,7 @@ @Override public long handleCommit(final long commitTime) { - return writeCheckpoint2().getCheckpointAddr(); + return writeCheckpoint2().getCheckpointAddr(); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/DefaultEvictionListener.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/DefaultEvictionListener.java 2014-09-02 20:06:22 UTC (rev 8638) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/DefaultEvictionListener.java 2014-09-03 15:12:24 UTC (rev 8639) @@ -23,7 +23,6 @@ */ /* * Created on Nov 17, 2006 - * */ package com.bigdata.btree; @@ -61,88 +60,132 @@ } final AbstractBTree btree = node.btree; + + if (btree.error != null) { + /** + * This occurs if an error was detected against a mutable view of + * the index (the unisolated index view) and the caller has not + * discarded the index and caused it to be reloaded from the most + * recent checkpoint. + * + * @see <a href="http://trac.bigdata.com/ticket/1005"> Invalidate + * BTree objects if error occurs during eviction </a> + */ + throw new IllegalStateException(AbstractBTree.ERROR_ERROR_STATE, + btree.error); + } + + try { - // Note: This assert can be violated for a read-only B+Tree since there - // is less synchronization. - assert btree.isReadOnly() || btree.ndistinctOnWriteRetentionQueue > 0; + // Note: This assert can be violated for a read-only B+Tree since + // there is less synchronization. + assert btree.isReadOnly() || btree.ndistinctOnWriteRetentionQueue > 0; - btree.ndistinctOnWriteRetentionQueue--; - - if (node.deleted) { + btree.ndistinctOnWriteRetentionQueue--; - /* - * Deleted nodes are ignored as they are evicted from the queue. - */ + if (node.deleted) { - return; + /* + * Deleted nodes are ignored as they are evicted from the queue. + */ - } + return; - // this does not permit transient nodes to be coded. - if (node.dirty && btree.store != null) { -// // this causes transient nodes to be coded on eviction. -// if (node.dirty) { - - if (node.isLeaf()) { + } - /* - * A leaf is written out directly. - */ - - btree.writeNodeOrLeaf(node); + // this does not permit transient nodes to be coded. + if (node.dirty && btree.store != null) { + // // this causes transient nodes to be coded on eviction. + // if (node.dirty) { - } else { + if (node.isLeaf()) { - /* - * A non-leaf node must be written out using a post-order - * traversal so that all dirty children are written through - * before the dirty parent. This is required in order to - * assign persistent identifiers to the dirty children. - */ + /* + * A leaf is written out directly. + */ - btree.writeNodeRecursive(node); + btree.writeNodeOrLeaf(node); - } + } else { - // is a coded data record. - assert node.isCoded(); - - // no longer dirty. - assert !node.dirty; - - if (btree.store != null) { - - // object is persistent (has assigned addr). - assert ref.identity != PO.NULL; - - } - - } // isDirty + /* + * A non-leaf node must be written out using a post-order + * traversal so that all dirty children are written through + * before the dirty parent. This is required in order to + * assign persistent identifiers to the dirty children. + */ - // This does not insert into the cache. That is handled by writeNodeOrLeaf. -// if (btree.globalLRU != null) { + btree.writeNodeRecursive(node); + + } + + // is a coded data record. + assert node.isCoded(); + + // no longer dirty. + assert !node.dirty; + + if (btree.store != null) { + + // object is persistent (has assigned addr). + assert ref.identity != PO.NULL; + + } + + } // isDirty + + // This does not insert into the cache. That is handled by writeNodeOrLeaf. +// if (btree.globalLRU != null) { // -// /* -// * Add the INodeData or ILeafData object to the global LRU, NOT the -// * Node or Leaf. -// * -// * Note: The global LRU touch only occurs on eviction from the write -// * retention queue. This is nice because it limits the touches on -// * the global LRU, which could otherwise be a hot spot. We do a -// * touch whether or not the node was persisted since we are likely -// * to return to the node in either case. -// */ +// /* +// * Add the INodeData or ILeafData object to the global LRU, NOT the +// * Node or Leaf. +// * +// * Note: The global LRU touch only occurs on eviction from the write +// * retention queue. This is nice because it limits the touches on +// * the global LRU, which could otherwise be a hot spot. We do a +// * touch whether or not the node was persisted since we are likely +// * to return to the node in either case. +// */ // -// final IAbstractNodeData delegate = node.getDelegate(); +// final IAbstractNodeData delegate = node.getDelegate(); // -// assert delegate != null : node.toString(); +// assert delegate != null : node.toString(); // -// assert delegate.isCoded() : node.toString(); +// assert delegate.isCoded() : node.toString(); // -// btree.globalLRU.add(delegate); +// btree.globalLRU.add(delegate); // -// } +// } + } catch (Throwable e) { + + if (!btree.readOnly) { + + /** + * If the btree is mutable and an eviction fails, then the index + * MUST be discarded. + * + * @see <a href="http://trac.bigdata.com/ticket/1005"> + * Invalidate BTree objects if error occurs during eviction + * </a> + */ + + btree.error = e; + + // Throw as Error. + throw new EvictionError(e); + + } + + // Launder the throwable. + if (e instanceof RuntimeException) + throw (RuntimeException) e; + + throw new RuntimeException(e); + + } + } } Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/EvictionError.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/EvictionError.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/EvictionError.java 2014-09-03 15:12:24 UTC (rev 8639) @@ -0,0 +1,66 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 2, 2014 + */ +package com.bigdata.btree; + +/** + * Error marks an mutable index as in an inconsistent state arising from an + * exception during eviction of a dirty node or leaf from a mutable index. The + * index MUST be reloaded from the current checkpoint record. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * @see <a href="http://trac.bigdata.com/ticket/1005"> Invalidate BTree objects + * if error occurs during eviction </a> + */ +public class EvictionError extends IndexInconsistentError { + + /** + * + */ + private static final long serialVersionUID = 1L; + + public EvictionError() { + } + + public EvictionError(String message) { + super(message); + } + + public EvictionError(Throwable cause) { + super(cause); + } + + public EvictionError(String message, Throwable cause) { + super(message, cause); + } + + public EvictionError(String message, Throwable cause, + boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java 2014-09-02 20:06:22 UTC (rev 8638) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java 2014-09-03 15:12:24 UTC (rev 8639) @@ -48,7 +48,7 @@ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/585" > GIST </a> */ public interface ICheckpointProtocol extends ICommitter, ICounterSetAccess, - ISimpleIndexAccess { + ISimpleIndexAccess, IReadWriteLockManager { /** * The value of the record version number that will be assigned to the next Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IReadWriteLockManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IReadWriteLockManager.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IReadWriteLockManager.java 2014-09-03 15:12:24 UTC (rev 8639) @@ -0,0 +1,82 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.btree; + +import java.util.concurrent.locks.Lock; + +/** + * Interface for managing read/write locks on persistence capable data + * structures. + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: Child does + * not have persistent identity </a> + */ +public interface IReadWriteLockManager { + + /** + * Return a {@link Lock} that may be used to obtain a shared read lock which + * is used (in the absence of other concurrency control mechanisms) to + * permit concurrent readers on an unisolated index while serializing access + * to that index when a writer must run. This is exposed for processes which + * need to obtain the write lock to coordinate external operations. + * <p> + * Note: If the persistence capable data structure is read-only then the + * returned {@link Lock} is a singleton that ignores all lock requests. This + * is because our read-only persistence capable data structures are already + * thread-safe for concurrent readers. + * + * @return The lock. + */ + Lock readLock(); + + /** + * Return a {@link Lock} that may be used to obtain an exclusive write lock + * which is used (in the absence of other concurrency control mechanisms) to + * serialize all processes accessing an unisolated index when a writer must + * run. This is exposed for processes which need to obtain the write lock to + * coordinate external operations. + * + * @return The lock. + * + * @throws UnsupportedOperationException + * unless the view supports mutation. + */ + Lock writeLock(); + + /** + * Return the #of read-locks held by the current thread for a mutable index + * view. + * + * @return The #of reentrant read locks held by the current thread -or- ZERO + * if the index is read-only (read locks are not tracked for a + * read-only index view). + */ + int getReadLockCount(); + + /** + * Return <code>true</code> iff the data structure is read-only. + */ + boolean isReadOnly(); + +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexInconsistentError.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexInconsistentError.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexInconsistentError.java 2014-09-03 15:12:24 UTC (rev 8639) @@ -0,0 +1,67 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 2, 2014 + */ +package com.bigdata.btree; + +/** + * Error marks an mutable index as in an inconsistent state. The index MUST be + * reloaded from the current checkpoint record. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * @see <a href="http://trac.bigdata.com/ticket/1005"> Invalidate BTree objects + * if error occurs during eviction </a> + */ +public class IndexInconsistentError extends Error { + + /** + * + */ + private static final long serialVersionUID = 1L; + + public IndexInconsistentError() { + } + + public IndexInconsistentError(String message) { + super(message); + } + + public IndexInconsistentError(Throwable cause) { + super(cause); + } + + public IndexInconsistentError(String message, Throwable cause) { + super(message, cause); + } + + public IndexInconsistentError(String message, Throwable cause, + boolean enableSuppression, boolean writableStackTrace) { + + super(message, cause, enableSuppression, writableStackTrace); + + } + +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/ReadWriteLockManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/ReadWriteLockManager.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/ReadWriteLockManager.java 2014-09-03 15:12:24 UTC (rev 8639) @@ -0,0 +1,524 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 2, 2014 + */ +package com.bigdata.btree; + +import java.util.WeakHashMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import com.bigdata.journal.ICommitter; + +/** + * Base class for managing read/write locks for unisolated {@link ICommitter}s. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: Child does + * not have persistent identity </a> + */ +public class ReadWriteLockManager implements IReadWriteLockManager { + +// private static final Logger log = Logger.getLogger(ReadWriteLockManager.class); + + /** + * The #of milliseconds that the class will wait for a read or write lock. A + * (wrapped) {@link InterruptedException} will be thrown if this timeout is + * exceeded. The default is {@value #LOCK_TIMEOUT_MILLIS} milliseconds. Use + * {@link Long#MAX_VALUE} for no timeout. + * + * TODO There may be no reason to have a timeout when waiting for a lock in + * which case we can get rid of this field. Also, there is no means + * available to configure the timeout (in a similar fashion you can not + * configure the fairness policy for the {@link ReentrantReadWriteLock}). + * <p> + * If we get rid of this field, then the {@link WrappedReadLock} and + * {@link WrappedWriteLock} classes can be simplified to have normal lock + * semantics rather than tryLock() based semantics. + */ + private static final long LOCK_TIMEOUT_MILLIS = Long.MAX_VALUE;// 10000; + + /** + * The unisolated persistence capable data structure. + */ + final private ICheckpointProtocol committer; + + /** + * True iff the caller's {@link ICheckpointProtocol} object was read-only. + */ + final private boolean readOnly; + + /** + * The {@link Lock} used to permit concurrent readers on an unisolated index + * while serializing access to that index when a writer must run. + */ + final private WrappedWriteLock writeLock; + + /** + * The {@link Lock} ensures that any code path that obtains the read lock + * also maintains the per-thread read-lock counter. + */ + final private Lock readLock; + + /** + * Canonicalizing mapping for the {@link ReadWriteLockManager} objects. + */ + static final private WeakHashMap<ICommitter, ReadWriteLockManager> locks = new WeakHashMap<ICommitter, ReadWriteLockManager>(); + + @Override + public int getReadLockCount() { + + if (readOnly) { + + // No locks are actually taken. + return 0; + + } + + // Return the locks actually held by this thread. + final Integer readLockCounter = ((WrappedReadLock) readLock).threadLockMap + .get(Thread.currentThread().getId()); + + if (readLockCounter == null) { + + // No read locks are held. + return 0; + + } + + return readLockCounter.intValue(); + + } + + @Override + public Lock readLock() { + + return readLock; + + } + + @Override + public Lock writeLock() { + + if (readOnly) + throw new UnsupportedOperationException( + AbstractBTree.ERROR_READ_ONLY); + + return writeLock; + + } + + @Override + public boolean isReadOnly() { + + /* + * Note: This method is grounded out without delegation to avoid + * recursion through the target persistence capable data structure. + */ + + return readOnly; + + } + + /** + * {@link WrappedReadLock} is used to intercept lock/unlock calls to the + * readLock to trigger calls to the logic that tracks the #of reentrant + * read-locks by read and which can be used to identify whether the readlock + * is held by the current thread. + * <p> + * This is tested in the touch() methods for the BTree and HTree classe to + * determine whether the touch should be ignored or trigger potential + * evictions. + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: Child + * does not have persistent identity </a> + */ + private class WrappedReadLock implements Lock { + + private final Lock delegate; + + /** + * Maintain count of readLocks on by Thread. This is used to avoid having + * read-only operations protected by an {@link ReadWriteLockManager} + * causing evictions of dirty nodes and leaves. + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: Child + * does not have persistent identity </a> + */ + private final ConcurrentHashMap<Long, Integer> threadLockMap; + + /** + * Track the #of read locks by thread IFF this is a read/write index + * view. + */ + private void readLockedThread() { + final long thisThreadId = Thread.currentThread().getId(); + final Integer entry = threadLockMap.get(thisThreadId); + final Integer newVal = entry == null ? 1 : 1 + entry.intValue(); + threadLockMap.put(thisThreadId, newVal); + } + + /** + * Track the #of read locks by thread IFF this is a read/write index + * view. + */ + private void readUnlockedThread() { + final long thisThreadId = Thread.currentThread().getId(); + final Integer entry = threadLockMap.get(thisThreadId); + assert entry != null; + if (entry.intValue() == 1) { + threadLockMap.remove(thisThreadId); + } else { + threadLockMap.put(thisThreadId, entry.intValue() - 1); + } + } + + WrappedReadLock(final Lock delegate) { + + if (delegate == null) + throw new IllegalArgumentException(); + + this.delegate = delegate; + + /* + * Configure parallelism default. + * + * Note: This CHM is ONLY used by mutable index views. So what + * matters here is the #of threads that contend for a mutable index + * view. I suspect that this is significantly fewer threads than we + * observe for concurrent read-only index views. Therefore I have + * set the parameters for the map based on the notion that only a + * few threads are contending for the mutable index object in order + * to reduce the heap burden associated with these CHM instances. If + * this map is observed to be hot spot, then we can simply use the + * defaults (initialCapacity = concurrencyLevel = 16). We only have + * this for the mutable index views and there are typically not that + * many instances of those open at the same time. + */ + final int initialCapacity = 4; + final int concurrencyLevel = initialCapacity; + final float loadFactor = .75f; + this.threadLockMap = new ConcurrentHashMap<Long, Integer>( + initialCapacity, loadFactor, concurrencyLevel); + + } + + @Override + public void lock() { + try { + /* + * Note: The original UnisolatedReadWriteLock semantics are + * always those of a tryLock with a default timeout. Make sure + * that we keep this in place! + */ + lockInterruptibly(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + @Override + public void lockInterruptibly() throws InterruptedException { + /* + * Note: The order in which we obtain the real read lock and + * increment (and decrement) the per-thread read lock counter on the + * AbstractBTree is not critical because AbstractBTree.touch() + * relies on the thread both owning the read lock and having the + * per-thread read lock counter incremented for that thread. + * + * Note: The original UnisolatedReadWriteLock semantics are always + * those of a tryLock with a default timeout. Make sure that we keep + * this in place! + */ +// delegate.lock(); + if (!delegate.tryLock(LOCK_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)) { + throw new RuntimeException("Timeout"); + } + readLockedThread(); + } + + @Override + public boolean tryLock() { + final boolean ret = delegate.tryLock(); + if (ret) { + readLockedThread(); + } + return ret; + } + + @Override + public boolean tryLock(final long time, final TimeUnit unit) + throws InterruptedException { + final boolean ret = delegate.tryLock(time, unit); + if (ret) { + readLockedThread(); + } + return ret; + } + + @Override + public void unlock() { + /* + * Note: The unlock order does not really matter. See the + * comments on lock() and AbstractBTree.touch(). + */ + delegate.unlock(); + /* + * Do this after the unlock() in case the lock/unlock are not + * correctly paired. + */ + readUnlockedThread(); + } + + @Override + public Condition newCondition() { + return delegate.newCondition(); + } + + } // class WrappedReadLock + + /** + * Wraps the write lock to provide interruptable tryLock() with timeout + * semantics for all write lock acquisitions. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: Child + * does not have persistent identity </a> + */ + private class WrappedWriteLock implements Lock { + + private final Lock delegate; + + WrappedWriteLock(final Lock delegate) { + + if (delegate == null) + throw new IllegalArgumentException(); + + this.delegate = delegate; + + } + + @Override + public void lock() { + try { + /* + * Note: The original UnisolatedReadWriteLock semantics are + * always those of a tryLock with a default timeout. Make sure + * that we keep this in place! + */ + lockInterruptibly(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + @Override + public void lockInterruptibly() throws InterruptedException { + /* + * Note: The original UnisolatedReadWriteLock semantics are always + * those of a tryLock with a default timeout. Make sure that we keep + * this in place! + */ +// delegate.lock(); + if (!delegate.tryLock(LOCK_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)) { + throw new RuntimeException("Timeout"); + } + } + + @Override + public boolean tryLock() { + return delegate.tryLock(); + } + + @Override + public boolean tryLock(final long time, final TimeUnit unit) + throws InterruptedException { + return delegate.tryLock(time, unit); + } + + @Override + public void unlock() { + delegate.unlock(); + } + + @Override + public Condition newCondition() { + return delegate.newCondition(); + } + + } // class WrappedWriteLock + + /** + * Class used for read lock for read-only data structures. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + private static class ConcurrentReaderLock implements Lock { + + @Override + public void lock() { + // NOP + } + + @Override + public void lockInterruptibly() throws InterruptedException { + // NOP + } + + @Override + public boolean tryLock() { + return true; + } + + @Override + public boolean tryLock(long time, TimeUnit unit) + throws InterruptedException { + return true; + } + + @Override + public void unlock() { + // NOP + } + + @Override + public Condition newCondition() { + throw new UnsupportedOperationException(); + } + + } + private static final Lock READ_ONLY_LOCK = new ConcurrentReaderLock(); + + /** + * Canonicalizing factory for the {@link ReadWriteLock} for an + * {@link ICommitter}. + * <p> + * Note: This method CAN NOT be exposed since that breaks encapsulation for + * the {@link WrappedReadLock}. + * + * @param index + * The btree. + * @return The lock. + * + * @throws IllegalArgumentException + * if the argument is <code>null</code>. + */ + static public ReadWriteLockManager getLockManager( + final ICheckpointProtocol index) { + + if (index == null) + throw new IllegalArgumentException(); + + synchronized (locks) { + + ReadWriteLockManager lockManager = locks.get(index); + + if (lockManager == null) { + + lockManager = new ReadWriteLockManager(index); + + locks.put(index, lockManager); + + } + + return lockManager; + } + + } + + /** + * Note: ONLY accessed through the canonicalizing pattern! + */ + private ReadWriteLockManager(final ICheckpointProtocol index) { + + this.committer = index; + + if (this.readOnly = index.isReadOnly()) { + + /* + * Since the index does not allow mutation, wrap with a NOP lock. + * + * Note: Concurrent readers are automatically supported by our + * persistent capable data structures so we return a NOP Lock + * implementation if the data structure is read-only. Also note that + * read-only data structures are (by definition) not mutable so we + * do not need to track the #of reentrant locks held for a read-only + * data structure (per above). + */ + this.readLock = READ_ONLY_LOCK; + + this.writeLock = null; + + } else { + + /* + * Note: fairness is NOT required for the locks. I believe that this + * is supposed to provide better throughput, but that has not been + * tested. Also, this has not been tested with a simple mutex lock + * vs a read-write lock. The use case for which this class was + * originally developed was computing the fix point of a set of + * rules. In that use case, we do a lot of concurrent reading and + * periodically flush the computed solutions onto the relations. It + * is likely that a read-write lock will do well for this situation. + */ + final ReadWriteLock readWriteLock = new ReentrantReadWriteLock( + false/* fair */); + + /** + * If the index allows mutation, then wrap with tryLock() and + * lock-counting semantics. This allows us to test for the #of + * reentrant locks held by the current thread in + * AbstractBTree.touch() and is the primary basis for the fix the + * ticket below. + * + * @see <a href="http://trac.bigdata.com/ticket/855"> + * AssertionError: Child does not have persistent identity </a> + */ + this.readLock = new WrappedReadLock(readWriteLock.readLock()); + + // Wrap with tryLock() semantics. + this.writeLock = new WrappedWriteLock(readWriteLock.writeLock()); + + } + + } + + @Override + final public String toString() { + + return getClass().getName() + "{committer=" + committer + ",readOnly=" + + readOnly + "}"; + + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java 2014-09-02 20:06:22 UTC (rev 8638) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java 2014-09-03 15:12:24 UTC (rev 8639) @@ -23,20 +23,14 @@ */ /* * Created on Jan 10, 2008 - * */ package com.bigdata.btree; import java.util.Iterator; -import java.util.WeakHashMap; -import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.log4j.Logger; - import com.bigdata.bop.cost.BTreeCostModel; import com.bigdata.bop.cost.DiskCostModel; import com.bigdata.bop.cost.ScanCostReport; @@ -48,10 +42,7 @@ import com.bigdata.btree.view.FusedView; import com.bigdata.counters.CounterSet; import com.bigdata.journal.ConcurrencyManager; -import com.bigdata.journal.ICommitter; import com.bigdata.journal.IConcurrencyManager; -import com.bigdata.journal.Journal; -import com.bigdata.journal.TemporaryStore; import com.bigdata.mdi.IResourceMetadata; import com.bigdata.service.Split; @@ -120,124 +111,52 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ -public class UnisolatedReadWriteIndex implements IIndex, ILinearList { +public class UnisolatedReadWriteIndex implements IIndex, ILinearList, + IReadWriteLockManager { - private static final Logger log = Logger.getLogger(UnisolatedReadWriteIndex.class); - /** - * The #of milliseconds that the class will wait for a read or write lock. A - * (wrapped) {@link InterruptedException} will be thrown if this timeout is - * exceeded. The default is {@value #LOCK_TIMEOUT_MILLIS} milliseconds. Use - * {@link Long#MAX_VALUE} for no timeout. - * - * @todo There may be no reason to have a timeout when waiting for a lock in - * which case we can get rid of this field. Also, there is no means - * available to configure the timeout (in a similar fashion you can - * not configure the fairness policy for the - * {@link ReentrantReadWriteLock}). + * The object that manages the locks for the associated index. */ - protected static final long LOCK_TIMEOUT_MILLIS = Long.MAX_VALUE;// 10000; + private final ReadWriteLockManager lockManager; - /** - * An exclusive write lock used (in the absence of other concurrency control - * mechanisms) to serialize all processes accessing an unisolated index when - * a writer must run. This is automatically obtained by methods on this - * class which will write on the underlying {@link IIndex}. It is exposed - * for processes which need to obtain the write lock to coordinate external - * operations. - * - * @return The acquired lock. - */ + @Override + public Lock readLock() { + return lockManager.readLock(); + } + + @Override public Lock writeLock() { - - final Lock writeLock = readWriteLock.writeLock(); - - try { - - if(log.isDebugEnabled()) { - - log.debug(ndx.toString()); - - } - -// writeLock.lock(); - - if(!writeLock.tryLock( LOCK_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)) { ... [truncated message content] |
From: <mrp...@us...> - 2014-09-02 20:06:31
|
Revision: 8638 http://sourceforge.net/p/bigdata/code/8638 Author: mrpersonick Date: 2014-09-02 20:06:22 +0000 (Tue, 02 Sep 2014) Log Message: ----------- Ticket #714: Sesame 2.7. Tidying up some CI loose ends. Modified Paths: -------------- branches/SESAME_2_7/bigdata-gas/src/java/com/bigdata/rdf/graph/util/AbstractGraphFixture.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/TestStrAfterBOp.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNegation.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/AbstractBigdataExprBuilderTestCase.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestBigdataExprBuilder.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestBindingsClause.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestGroupGraphPatternBuilder.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestSubqueryPatterns.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestTriplePatternBuilder.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestUpdateExprBuilder.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestValueExprBuilder.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestVirtualGraphs.java Modified: branches/SESAME_2_7/bigdata-gas/src/java/com/bigdata/rdf/graph/util/AbstractGraphFixture.java =================================================================== --- branches/SESAME_2_7/bigdata-gas/src/java/com/bigdata/rdf/graph/util/AbstractGraphFixture.java 2014-09-02 19:28:35 UTC (rev 8637) +++ branches/SESAME_2_7/bigdata-gas/src/java/com/bigdata/rdf/graph/util/AbstractGraphFixture.java 2014-09-02 20:06:22 UTC (rev 8638) @@ -28,6 +28,7 @@ SailConnection cxn = null; try { cxn = getSail().getConnection(); + cxn.begin(); newSailGraphLoader(cxn).loadGraph(null/* fallback */, resources); cxn.commit(); ok = true; Modified: branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/TestStrAfterBOp.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/TestStrAfterBOp.java 2014-09-02 19:28:35 UTC (rev 8637) +++ branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/internal/constraints/TestStrAfterBOp.java 2014-09-02 20:06:22 UTC (rev 8638) @@ -143,7 +143,7 @@ // strbefore("abc"^^xsd:string,"") -> ""^^xsd:string { final IV expected = DummyConstantNode.toDummyIV(vf - .createLiteral("", XSD.STRING)); + .createLiteral("abc", XSD.STRING)); final IV arg1 = DummyConstantNode.toDummyIV(vf .createLiteral("abc", XSD.STRING)); Modified: branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNegation.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNegation.java 2014-09-02 19:28:35 UTC (rev 8637) +++ branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNegation.java 2014-09-02 20:06:22 UTC (rev 8638) @@ -36,6 +36,7 @@ import com.bigdata.rdf.model.BigdataURI; import com.bigdata.rdf.model.BigdataValue; import com.bigdata.rdf.model.BigdataValueFactory; +import com.bigdata.rdf.sail.sparql.PrefixDeclProcessor; import com.bigdata.rdf.sparql.ast.ASTContainer; import com.bigdata.rdf.sparql.ast.AssignmentNode; import com.bigdata.rdf.sparql.ast.ConstantNode; @@ -511,7 +512,7 @@ // Prefix declarations. { - expected.setPrefixDecls((Map)Collections.emptyMap()); + expected.setPrefixDecls(PrefixDeclProcessor.defaultDecls); } // Top-level projection. Modified: branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/AbstractBigdataExprBuilderTestCase.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/AbstractBigdataExprBuilderTestCase.java 2014-09-02 19:28:35 UTC (rev 8637) +++ branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/AbstractBigdataExprBuilderTestCase.java 2014-09-02 20:06:22 UTC (rev 8638) @@ -69,7 +69,7 @@ public class AbstractBigdataExprBuilderTestCase extends TestCase { private static final Logger log = Logger - .getLogger(TestBigdataExprBuilder.class); + .getLogger(AbstractBigdataExprBuilderTestCase.class); /** * Modified: branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestBigdataExprBuilder.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestBigdataExprBuilder.java 2014-09-02 19:28:35 UTC (rev 8637) +++ branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestBigdataExprBuilder.java 2014-09-02 20:06:22 UTC (rev 8638) @@ -96,7 +96,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -133,7 +133,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -171,7 +171,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -212,7 +212,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -262,7 +262,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -314,7 +314,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -370,7 +370,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -422,7 +422,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -466,7 +466,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -511,7 +511,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -551,7 +551,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -596,7 +596,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -645,7 +645,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -687,7 +687,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -732,7 +732,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -775,7 +775,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -821,7 +821,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -864,7 +864,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); prefixDecls.put("rdf", RDF.NAMESPACE); expected.setPrefixDecls(prefixDecls); } @@ -908,7 +908,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); prefixDecls.put("rdf", RDF.NAMESPACE); expected.setPrefixDecls(prefixDecls); } @@ -973,7 +973,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); prefixDecls.put("foaf", FOAFVocabularyDecl.NAMESPACE); prefixDecls.put("dc", DC.NAMESPACE); expected.setPrefixDecls(prefixDecls); @@ -1067,7 +1067,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); prefixDecls.put("foaf", FOAFVocabularyDecl.NAMESPACE); prefixDecls.put("dc", DC.NAMESPACE); expected.setPrefixDecls(prefixDecls); @@ -1180,7 +1180,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("foaf", FOAFVocabularyDecl.NAMESPACE); } Modified: branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestBindingsClause.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestBindingsClause.java 2014-09-02 19:28:35 UTC (rev 8637) +++ branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestBindingsClause.java 2014-09-02 20:06:22 UTC (rev 8638) @@ -113,7 +113,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); prefixDecls.put("", "http://example.org/book/"); expected.setPrefixDecls(prefixDecls); } @@ -196,7 +196,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); prefixDecls.put("", "http://example.org/book/"); expected.setPrefixDecls(prefixDecls); } @@ -282,7 +282,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); prefixDecls.put("", "http://example.org/book/"); expected.setPrefixDecls(prefixDecls); } @@ -374,7 +374,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); prefixDecls.put("", "http://example.org/book/"); expected.setPrefixDecls(prefixDecls); } @@ -463,7 +463,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); prefixDecls.put("", "http://example.org/book/"); expected.setPrefixDecls(prefixDecls); } Modified: branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestGroupGraphPatternBuilder.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestGroupGraphPatternBuilder.java 2014-09-02 19:28:35 UTC (rev 8637) +++ branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestGroupGraphPatternBuilder.java 2014-09-02 20:06:22 UTC (rev 8638) @@ -107,7 +107,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -141,7 +141,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -184,7 +184,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -225,7 +225,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -277,7 +277,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -330,7 +330,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -385,7 +385,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -447,7 +447,7 @@ final VarNode o = new VarNode("o"); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -522,7 +522,7 @@ final VarNode o = new VarNode("o"); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -599,7 +599,7 @@ final VarNode o = new VarNode("o"); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -659,7 +659,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -702,7 +702,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -747,7 +747,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -794,7 +794,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -844,7 +844,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -895,7 +895,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -941,7 +941,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -989,7 +989,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -1038,7 +1038,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -1092,7 +1092,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -1148,7 +1148,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -1203,7 +1203,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -1259,7 +1259,7 @@ final QueryRoot expected = new QueryRoot(QueryType.SELECT); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); { prefixDecls.put("", "http://www.bigdata.com/"); expected.setPrefixDecls(prefixDecls); @@ -1314,8 +1314,8 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); - expected.setPrefixDecls(prefixDecls); +// final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); +// expected.setPrefixDecls(prefixDecls); } { Modified: branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestSubqueryPatterns.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestSubqueryPatterns.java 2014-09-02 19:28:35 UTC (rev 8637) +++ branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestSubqueryPatterns.java 2014-09-02 20:06:22 UTC (rev 8638) @@ -104,7 +104,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -169,7 +169,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -235,7 +235,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -301,7 +301,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -371,7 +371,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -454,7 +454,7 @@ makeIV(valueFactory.createLiteral("12", XSD.INTEGER))); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -566,7 +566,7 @@ final VarNode anonvar = mockAnonVar("-exists-1"); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); prefixDecls.put("rdf", RDF.NAMESPACE); prefixDecls.put("foaf", FOAFVocabularyDecl.NAMESPACE); expected.setPrefixDecls(prefixDecls); @@ -643,7 +643,7 @@ final VarNode anonvar = mockAnonVar("-exists-1"); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); prefixDecls.put("rdf", RDF.NAMESPACE); prefixDecls.put("foaf", FOAFVocabularyDecl.NAMESPACE); expected.setPrefixDecls(prefixDecls); @@ -732,7 +732,7 @@ .stringValue()))); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); prefixDecls.put("rdfs", RDFS.NAMESPACE); expected.setPrefixDecls(prefixDecls); } @@ -839,7 +839,7 @@ // .stringValue()))); // // { -// final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); +// final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); // prefixDecls.put("rdfs", RDFS.NAMESPACE); // expected.setPrefixDecls(prefixDecls); // } @@ -947,7 +947,7 @@ // .stringValue()))); // // { -// final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); +// final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); // prefixDecls.put("rdfs", RDFS.NAMESPACE); // expected.setPrefixDecls(prefixDecls); // } Modified: branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestTriplePatternBuilder.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestTriplePatternBuilder.java 2014-09-02 19:28:35 UTC (rev 8637) +++ branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestTriplePatternBuilder.java 2014-09-02 20:06:22 UTC (rev 8638) @@ -92,7 +92,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -137,7 +137,7 @@ final VarNode o = new VarNode("o"); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -181,7 +181,7 @@ final VarNode o = new VarNode("o"); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -225,7 +225,7 @@ makeIV(valueFactory.createURI("http://www.bigdata.com/o"))); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -270,7 +270,7 @@ makeIV(valueFactory.createURI("http://www.bigdata.com/o"))); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -315,7 +315,7 @@ final VarNode o = new VarNode("o"); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -363,7 +363,7 @@ final VarNode x = new VarNode("x"); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -399,7 +399,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -439,7 +439,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -495,7 +495,7 @@ makeIV(valueFactory.createURI("http://www.bigdata.com/bar"))); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -540,7 +540,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -596,7 +596,7 @@ makeIV(valueFactory.createURI("http://www.bigdata.com/goo"))); { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -647,7 +647,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } @@ -703,7 +703,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("", "http://example.org/"); } @@ -769,7 +769,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("", "http://example.org/"); } @@ -840,7 +840,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("", "http://example.org/"); } @@ -920,7 +920,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("", "http://example.org/"); } @@ -1008,7 +1008,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("foaf", FOAFVocabularyDecl.NAMESPACE); } @@ -1105,7 +1105,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("", "http://example.org/ns#"); prefixDecls.put("rdf", RDF.NAMESPACE); @@ -1220,7 +1220,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("", "http://example.org/ns#"); prefixDecls.put("rdf", RDF.NAMESPACE); @@ -1330,7 +1330,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("xsd", XSD.NAMESPACE); prefixDecls.put("", "http://example.org/ns#"); @@ -1432,7 +1432,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("", "http://example.org/ns#"); } @@ -1544,7 +1544,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("", "http://example.org/ns#"); } @@ -1653,7 +1653,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("dc", DCElementsVocabularyDecl.NAMESPACE); } @@ -1733,7 +1733,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("dc", DCElementsVocabularyDecl.NAMESPACE); } @@ -1805,7 +1805,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("dc", DCElementsVocabularyDecl.NAMESPACE); } @@ -1876,7 +1876,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); prefixDecls.put("dc", DCElementsVocabularyDecl.NAMESPACE); } Modified: branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestUpdateExprBuilder.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestUpdateExprBuilder.java 2014-09-02 19:28:35 UTC (rev 8637) +++ branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestUpdateExprBuilder.java 2014-09-02 20:06:22 UTC (rev 8638) @@ -1573,7 +1573,7 @@ + "{\n" + " <http://example/book1> dc:title \"A new book\" .\n" + " <http://example/book1> dc:creator \"A.N.Other\" .\n" // - + " GRAPH <http://example/bookStore> { <http://example/book1> ns:price 42 }\n" + + " <http://example/book1> ns:price 42 <http://example/bookStore> .\n" + "}"; final UpdateRoot expected = new UpdateRoot(); Modified: branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestValueExprBuilder.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestValueExprBuilder.java 2014-09-02 19:28:35 UTC (rev 8637) +++ branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestValueExprBuilder.java 2014-09-02 20:06:22 UTC (rev 8638) @@ -214,7 +214,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); expected.setPrefixDecls(prefixDecls); } Modified: branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestVirtualGraphs.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestVirtualGraphs.java 2014-09-02 19:28:35 UTC (rev 8637) +++ branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestVirtualGraphs.java 2014-09-02 20:06:22 UTC (rev 8638) @@ -191,7 +191,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); prefixDecls.put("foaf", FOAFVocabularyDecl.NAMESPACE); prefixDecls.put("dc", DC.NAMESPACE); expected.setPrefixDecls(prefixDecls); @@ -345,7 +345,7 @@ { { - final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String, String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); prefixDecls.put("foaf", FOAFVocabularyDecl.NAMESPACE); prefixDecls.put("dc", DC.NAMESPACE); expected.setPrefixDecls(prefixDecls); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-09-02 19:28:40
|
Revision: 8637 http://sourceforge.net/p/bigdata/code/8637 Author: mrpersonick Date: 2014-09-02 19:28:35 +0000 (Tue, 02 Sep 2014) Log Message: ----------- Ticket #714: Sesame 2.7. Tidying up some CI loose ends. Modified Paths: -------------- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestRemoteSparql10QueryBuilder.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestRemoteSparql11QueryBuilder.java branches/SESAME_2_7/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/PrefixDeclProcessor.java Modified: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java 2014-09-02 16:01:51 UTC (rev 8636) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java 2014-09-02 19:28:35 UTC (rev 8637) @@ -953,6 +953,10 @@ // final Map<String, BindingsClause> nsBindingsClauses, final List<BindingsClause> bindingsClauses) { + if (group == null) { + return; + } + if (group instanceof JoinGroupNode && ((JoinGroupNode) group).isOptional()) { return; } Modified: branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestRemoteSparql10QueryBuilder.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestRemoteSparql10QueryBuilder.java 2014-09-02 16:01:51 UTC (rev 8636) +++ branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestRemoteSparql10QueryBuilder.java 2014-09-02 19:28:35 UTC (rev 8637) @@ -50,6 +50,7 @@ import com.bigdata.rdf.model.BigdataURI; import com.bigdata.rdf.model.BigdataValue; import com.bigdata.rdf.sail.sparql.AbstractBigdataExprBuilderTestCase; +import com.bigdata.rdf.sail.sparql.PrefixDeclProcessor; import com.bigdata.rdf.sparql.ast.ConstantNode; import com.bigdata.rdf.sparql.ast.FilterNode; import com.bigdata.rdf.sparql.ast.FunctionNode; @@ -157,7 +158,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); { prefixDecls.put("foo", "http://www.bigdata.com/foo"); } @@ -229,7 +230,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); { prefixDecls.put("foo", "http://www.bigdata.com/foo"); } @@ -316,7 +317,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); { prefixDecls.put("foo", "http://www.bigdata.com/foo"); } @@ -463,7 +464,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?book ?p ?o}"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); { prefixDecls.put("", "http://example.org/book/"); } @@ -652,7 +653,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); final ServiceNode serviceNode = new ServiceNode(new ConstantNode( makeIV(serviceURI)), groupNode); @@ -763,7 +764,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); final ServiceNode serviceNode = new ServiceNode(new ConstantNode( makeIV(serviceURI)), groupNode); @@ -888,7 +889,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o . ?s ?p ?o1 }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); final ServiceNode serviceNode = new ServiceNode(new ConstantNode( makeIV(serviceURI)), groupNode); @@ -1023,7 +1024,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); final ServiceNode serviceNode = new ServiceNode(new ConstantNode( makeIV(serviceURI)), groupNode); @@ -1149,7 +1150,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); final ServiceNode serviceNode = new ServiceNode(new ConstantNode( makeIV(serviceURI)), groupNode); @@ -1358,7 +1359,7 @@ // // final String exprImage = "SERVICE <" + serviceURI + "> { [] ?p ?o }"; // -// final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); +// final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); // // final ServiceNode serviceNode = new ServiceNode(new ConstantNode( // makeIV(serviceURI)), groupNode); Modified: branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestRemoteSparql11QueryBuilder.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestRemoteSparql11QueryBuilder.java 2014-09-02 16:01:51 UTC (rev 8636) +++ branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestRemoteSparql11QueryBuilder.java 2014-09-02 19:28:35 UTC (rev 8637) @@ -56,6 +56,7 @@ import com.bigdata.rdf.model.BigdataURI; import com.bigdata.rdf.model.BigdataValue; import com.bigdata.rdf.sail.sparql.AbstractBigdataExprBuilderTestCase; +import com.bigdata.rdf.sail.sparql.PrefixDeclProcessor; import com.bigdata.rdf.sparql.ast.BindingsClause; import com.bigdata.rdf.sparql.ast.ConstantNode; import com.bigdata.rdf.sparql.ast.FilterNode; @@ -169,7 +170,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); { prefixDecls.put("foo", "http://www.bigdata.com/foo"); } @@ -249,7 +250,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); { prefixDecls.put("foo", "http://www.bigdata.com/foo"); } @@ -345,7 +346,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); { prefixDecls.put("foo", "http://www.bigdata.com/foo"); } @@ -477,7 +478,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?book ?p ?o}"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); { prefixDecls.put("", "http://example.org/book/"); } @@ -624,7 +625,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); final ServiceNode serviceNode = new ServiceNode(new ConstantNode( makeIV(serviceURI)), groupNode); @@ -728,7 +729,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); final ServiceNode serviceNode = new ServiceNode(new ConstantNode( makeIV(serviceURI)), groupNode); @@ -854,7 +855,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o . ?s ?p ?o1 }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); final ServiceNode serviceNode = new ServiceNode(new ConstantNode( makeIV(serviceURI)), groupNode); @@ -985,7 +986,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); final ServiceNode serviceNode = new ServiceNode(new ConstantNode( makeIV(serviceURI)), groupNode); @@ -1073,7 +1074,7 @@ final String exprImage = "SERVICE <" + serviceURI + "> { ?s ?p ?o }"; - final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(); + final Map<String,String> prefixDecls = new LinkedHashMap<String, String>(PrefixDeclProcessor.defaultDecls); final ServiceNode serviceNode = new ServiceNode(new ConstantNode( makeIV(serviceURI)), groupNode); Modified: branches/SESAME_2_7/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/PrefixDeclProcessor.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/PrefixDeclProcessor.java 2014-09-02 16:01:51 UTC (rev 8636) +++ branches/SESAME_2_7/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/PrefixDeclProcessor.java 2014-09-02 19:28:35 UTC (rev 8637) @@ -12,7 +12,9 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import org.openrdf.model.vocabulary.DC; import org.openrdf.model.vocabulary.FN; +import org.openrdf.model.vocabulary.FOAF; import org.openrdf.model.vocabulary.OWL; import org.openrdf.model.vocabulary.RDF; import org.openrdf.model.vocabulary.RDFS; @@ -45,6 +47,23 @@ */ public class PrefixDeclProcessor { + public static final Map<String,String> defaultDecls = + new LinkedHashMap<String, String>(); + + static { + defaultDecls.put("rdf", RDF.NAMESPACE); + defaultDecls.put("rdfs", RDFS.NAMESPACE); + defaultDecls.put("sesame", SESAME.NAMESPACE); + defaultDecls.put("owl", OWL.NAMESPACE); + defaultDecls.put("xsd", XMLSchema.NAMESPACE); + defaultDecls.put("fn", FN.NAMESPACE); + defaultDecls.put("foaf", FOAF.NAMESPACE); + defaultDecls.put("dc", DC.NAMESPACE); + defaultDecls.put("hint", QueryHints.NAMESPACE); + defaultDecls.put("bd", BD.NAMESPACE); + defaultDecls.put("bds", BDS.NAMESPACE); + } + /** * Processes prefix declarations in queries. This method collects all * prefixes that are declared in the supplied query, verifies that prefixes @@ -59,7 +78,7 @@ * If the query contains redefined prefixes or qnames that use * undefined prefixes. */ - public static Map<String, String> process(ASTOperationContainer qc) + public static Map<String, String> process(ASTOperationContainer qc) throws MalformedQueryException { List<ASTPrefixDecl> prefixDeclList = qc.getPrefixDeclList(); @@ -78,34 +97,37 @@ prefixMap.put(prefix, iri); } - // insert some default prefixes (if not explicitly defined in the query) - insertDefaultPrefix(prefixMap, "rdf", RDF.NAMESPACE); - insertDefaultPrefix(prefixMap, "rdfs", RDFS.NAMESPACE); - insertDefaultPrefix(prefixMap, "sesame", SESAME.NAMESPACE); - insertDefaultPrefix(prefixMap, "owl", OWL.NAMESPACE); - insertDefaultPrefix(prefixMap, "xsd", XMLSchema.NAMESPACE); - insertDefaultPrefix(prefixMap, "fn", FN.NAMESPACE); - insertDefaultPrefix(prefixMap, "hint", QueryHints.NAMESPACE); - insertDefaultPrefix(prefixMap, "bd", BD.NAMESPACE); - insertDefaultPrefix(prefixMap, "bds", BDS.NAMESPACE); + // insert some default prefixes (if not explicitly defined in the query) +// insertDefaultPrefix(prefixMap, "rdf", RDF.NAMESPACE); +// insertDefaultPrefix(prefixMap, "rdfs", RDFS.NAMESPACE); +// insertDefaultPrefix(prefixMap, "sesame", SESAME.NAMESPACE); +// insertDefaultPrefix(prefixMap, "owl", OWL.NAMESPACE); +// insertDefaultPrefix(prefixMap, "xsd", XMLSchema.NAMESPACE); +// insertDefaultPrefix(prefixMap, "fn", FN.NAMESPACE); +// insertDefaultPrefix(prefixMap, "hint", QueryHints.NAMESPACE); +// insertDefaultPrefix(prefixMap, "bd", BD.NAMESPACE); +// insertDefaultPrefix(prefixMap, "bds", BDS.NAMESPACE); + for (Map.Entry<String, String> e : defaultDecls.entrySet()) { + insertDefaultPrefix(prefixMap, e.getKey(), e.getValue()); + } - ASTUnparsedQuadDataBlock dataBlock = null; - if (qc.getOperation() instanceof ASTInsertData) { - ASTInsertData insertData = (ASTInsertData)qc.getOperation(); - dataBlock = insertData.jjtGetChild(ASTUnparsedQuadDataBlock.class); + ASTUnparsedQuadDataBlock dataBlock = null; + if (qc.getOperation() instanceof ASTInsertData) { + ASTInsertData insertData = (ASTInsertData)qc.getOperation(); + dataBlock = insertData.jjtGetChild(ASTUnparsedQuadDataBlock.class); - } - else if (qc.getOperation() instanceof ASTDeleteData) { - ASTDeleteData deleteData = (ASTDeleteData)qc.getOperation(); - dataBlock = deleteData.jjtGetChild(ASTUnparsedQuadDataBlock.class); - } + } + else if (qc.getOperation() instanceof ASTDeleteData) { + ASTDeleteData deleteData = (ASTDeleteData)qc.getOperation(); + dataBlock = deleteData.jjtGetChild(ASTUnparsedQuadDataBlock.class); + } - if (dataBlock != null) { - String prefixes = createPrefixesInSPARQLFormat(prefixMap); - // TODO optimize string concat? - dataBlock.setDataBlock(prefixes + dataBlock.getDataBlock()); - } - else { + if (dataBlock != null) { + String prefixes = createPrefixesInSPARQLFormat(prefixMap); + // TODO optimize string concat? + dataBlock.setDataBlock(prefixes + dataBlock.getDataBlock()); + } + else { QNameProcessor visitor = new QNameProcessor(prefixMap); try { qc.jjtAccept(visitor, null); @@ -113,30 +135,30 @@ catch (VisitorException e) { throw new MalformedQueryException(e); } - } + } return prefixMap; } - private static void insertDefaultPrefix(Map<String, String> prefixMap, String prefix, String namespace) { - if (!prefixMap.containsKey(prefix) && !prefixMap.containsValue(namespace)) { - prefixMap.put(prefix, namespace); - } - } + private static void insertDefaultPrefix(Map<String, String> prefixMap, String prefix, String namespace) { + if (!prefixMap.containsKey(prefix) && !prefixMap.containsValue(namespace)) { + prefixMap.put(prefix, namespace); + } + } - private static String createPrefixesInSPARQLFormat(Map<String, String> prefixMap) { - StringBuilder sb = new StringBuilder(); - for (Entry<String, String> entry : prefixMap.entrySet()) { - sb.append("PREFIX"); - final String prefix = entry.getKey(); - if (prefix != null) { - sb.append(" " + prefix); - } - sb.append(":"); - sb.append(" <" + entry.getValue() + "> \n"); - } - return sb.toString(); - } + private static String createPrefixesInSPARQLFormat(Map<String, String> prefixMap) { + StringBuilder sb = new StringBuilder(); + for (Entry<String, String> entry : prefixMap.entrySet()) { + sb.append("PREFIX"); + final String prefix = entry.getKey(); + if (prefix != null) { + sb.append(" " + prefix); + } + sb.append(":"); + sb.append(" <" + entry.getValue() + "> \n"); + } + return sb.toString(); + } private static class QNameProcessor extends ASTVisitorBase { @@ -147,48 +169,48 @@ } @Override - public Object visit(ASTQName qnameNode, Object data) + public Object visit(ASTQName qnameNode, Object data) throws VisitorException { - String qname = qnameNode.getValue(); + String qname = qnameNode.getValue(); - int colonIdx = qname.indexOf(':'); + int colonIdx = qname.indexOf(':'); assert colonIdx >= 0 : "colonIdx should be >= 0: " + colonIdx; - String prefix = qname.substring(0, colonIdx); - String localName = qname.substring(colonIdx + 1); + String prefix = qname.substring(0, colonIdx); + String localName = qname.substring(colonIdx + 1); String namespace = prefixMap.get(prefix); if (namespace == null) { - throw new VisitorException("QName '" + qname + "' uses an undefined prefix"); + throw new VisitorException("QName '" + qname + "' uses an undefined prefix"); } - localName = processEscapesAndHex(localName); + localName = processEscapesAndHex(localName); // Replace the qname node with a new IRI node in the parent node - ASTIRI iriNode = new ASTIRI(SyntaxTreeBuilderTreeConstants.JJTIRI); + ASTIRI iriNode = new ASTIRI(SyntaxTreeBuilderTreeConstants.JJTIRI); iriNode.setValue(namespace + localName); qnameNode.jjtReplaceWith(iriNode); return null; } - private String processEscapesAndHex(String localName) { + private String processEscapesAndHex(String localName) { // first process hex-encoded chars. - StringBuffer unencoded = new StringBuffer(); - Pattern hexPattern = Pattern.compile("([^\\\\]|^)(%[A-F\\d][A-F\\d])", Pattern.CASE_INSENSITIVE); - Matcher m = hexPattern.matcher(localName); + StringBuffer unencoded = new StringBuffer(); + Pattern hexPattern = Pattern.compile("([^\\\\]|^)(%[A-F\\d][A-F\\d])", Pattern.CASE_INSENSITIVE); + Matcher m = hexPattern.matcher(localName); boolean result = m.find(); while (result) { - // we match the previous char because we need to be sure we are not - // processing an escaped % char rather than + // we match the previous char because we need to be sure we are not + // processing an escaped % char rather than // an actual hex encoding, for example: 'foo\%bar'. - String previousChar = m.group(1); - String encoded = m.group(2); + String previousChar = m.group(1); + String encoded = m.group(2); - int codePoint = Integer.parseInt(encoded.substring(1), 16); - String decoded = String.valueOf(Character.toChars(codePoint)); + int codePoint = Integer.parseInt(encoded.substring(1), 16); + String decoded = String.valueOf(Character.toChars(codePoint)); m.appendReplacement(unencoded, previousChar + decoded); result = m.find(); @@ -196,22 +218,22 @@ m.appendTail(unencoded); // then process escaped special chars. - StringBuffer unescaped = new StringBuffer(); - Pattern escapedCharPattern = Pattern.compile("\\\\[_~\\.\\-!\\$\\&\\'\\(\\)\\*\\+\\,\\;\\=\\:\\/\\?#\\@\\%]"); - m = escapedCharPattern.matcher(unencoded.toString()); - result = m.find(); + StringBuffer unescaped = new StringBuffer(); + Pattern escapedCharPattern = Pattern.compile("\\\\[_~\\.\\-!\\$\\&\\'\\(\\)\\*\\+\\,\\;\\=\\:\\/\\?#\\@\\%]"); + m = escapedCharPattern.matcher(unencoded.toString()); + result = m.find(); while (result) { - String escaped = m.group(); + String escaped = m.group(); m.appendReplacement(unescaped, escaped.substring(1)); result = m.find(); } m.appendTail(unescaped); - return unescaped.toString(); + return unescaped.toString(); } @Override - public Object visit(ASTServiceGraphPattern node, Object data) + public Object visit(ASTServiceGraphPattern node, Object data) throws VisitorException { node.setPrefixDeclarations(prefixMap); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-09-02 16:01:56
|
Revision: 8636 http://sourceforge.net/p/bigdata/code/8636 Author: mrpersonick Date: 2014-09-02 16:01:51 +0000 (Tue, 02 Sep 2014) Log Message: ----------- Bringing 2.7 branch up to date from 1.3 branch (HEAD = 8635). Modified Paths: -------------- branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/PipelineOp.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/controller/HTreeNamedSubqueryOp.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/controller/JVMNamedSubqueryOp.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/fed/DelegateIndexManager.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinOp.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/HashIndexOp.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/HashJoinOp.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/JVMHashJoinOp.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/solutions/HTreeDistinctBindingSetsOp.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/solutions/MemorySortOp.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/solutions/PipelinedAggregationOp.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractBTree.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractNode.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/BTree.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/Checkpoint.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/DefaultEvictionListener.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/IIndex.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ILocalBTreeView.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ReadCommittedView.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/data/IAbstractNodeData.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/data/ILeafData.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/isolation/IsolatedFusedView.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/proc/IIndexProcedure.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/view/FusedView.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/cache/ConcurrentWeakValueCache.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/htree/AbstractHTree.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/htree/HTree.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/io/writecache/IBufferedWriter.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/io/writecache/WriteCacheCounters.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/AbstractTask.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/ConcurrencyManager.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/DumpJournal.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/IBTreeManager.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/IIndexManager.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/IIndexStore.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/IJournal.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/IResourceManager.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/ITask.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/IndexProcedureTask.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/Journal.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/TemporaryStore.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/resources/IndexManager.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/resources/ResourceEvents.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/resources/StoreManager.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/service/MetadataService.java branches/SESAME_2_7/bigdata/src/test/com/bigdata/bop/join/TestJVMHashJoinOp.java branches/SESAME_2_7/bigdata/src/test/com/bigdata/bop/solutions/TestHTreeDistinctBindingSets.java branches/SESAME_2_7/bigdata/src/test/com/bigdata/bop/solutions/TestMemorySortOp.java branches/SESAME_2_7/bigdata/src/test/com/bigdata/journal/StressTestUnisolatedReadWriteIndex.java branches/SESAME_2_7/bigdata/src/test/com/bigdata/journal/TestJournalBasics.java branches/SESAME_2_7/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphEmbedded.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphFactory.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphQuery.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataPredicate.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataVertex.java branches/SESAME_2_7/bigdata-blueprints/src/test/com/bigdata/blueprints/AbstractTestBigdataGraph.java branches/SESAME_2_7/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/DTE.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSD.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/AbstractIV.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/URIExtensionIV.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/DefaultOptimizerList.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/graph/impl/bd/TestBFS.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/internal/HashCollisionUtility.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/internal/TestDTE.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTickets.java branches/SESAME_2_7/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java branches/SESAME_2_7/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataValueReplacer.java branches/SESAME_2_7/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java branches/SESAME_2_7/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEmbeddedFederationWithQuads.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithSids.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithoutSids.java branches/SESAME_2_7/bigdata-war/src/WEB-INF/GraphStore.properties branches/SESAME_2_7/bigdata-war/src/html/css/style.css branches/SESAME_2_7/bigdata-war/src/html/index.html branches/SESAME_2_7/bigdata-war/src/html/js/workbench.js Added Paths: ----------- branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/ISingleThreadedOp.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/IGISTLocalManager.java branches/SESAME_2_7/bigdata/src/java/com/bigdata/journal/IGISTManager.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphlet.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataQueryProjection.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataSelection.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BlueprintsValueFactory.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/DefaultBlueprintsValueFactory.java branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/ImmortalGraph.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/IInlineURIFactory.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineIPv4URIHandler.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineURIFactory.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineURIHandler.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineUUIDURIHandler.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/MultipurposeIDHandler.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/NoInlineURIFactory.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPv4AddrIV.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/vocab/BaseVocabularyDecl.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/vocab/DefaultBigdataVocabulary.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket-765.rq branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket-765.srx branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket-765.trig branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/TestInlineURIs.java branches/SESAME_2_7/bigdata-war/src/html/js/vendor/cm-addons/matchbrackets.js Removed Paths: ------------- branches/SESAME_2_7/bigdata-blueprints/src/java/com/bigdata/blueprints/BlueprintsRDFFactory.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java Copied: branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/ISingleThreadedOp.java (from rev 8635, branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/ISingleThreadedOp.java) =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/ISingleThreadedOp.java (rev 0) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/ISingleThreadedOp.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -0,0 +1,40 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +/* + * Created on Aug 26, 2010 + */ +package com.bigdata.bop; + +/** + * Marker interface for an operator whose instances do not support concurrent + * execution. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * @see PipelineOp.Annotations#MAX_PARALLEL + */ +public interface ISingleThreadedOp { + +} Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/PipelineOp.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/PipelineOp.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/PipelineOp.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -122,6 +122,8 @@ * {@link #MAX_MESSAGES_PER_TASK} and {@link #PIPELINE_QUEUE_CAPACITY} * have less effect and performance tends to be best around a modest * value (10) for those annotations. + * + * @see ISingleThreadedOp */ String MAX_PARALLEL = PipelineOp.class.getName() + ".maxParallel"; @@ -505,17 +507,49 @@ // // } - /** - * The maximum parallelism with which tasks may be evaluated for this - * operator (this is a per-shard limit in scale-out). A value of ONE (1) - * indicates that at most ONE (1) instance of this task may be executing in - * parallel for a given shard and may be used to indicate that the operator - * evaluation task is not thread-safe. - * - * @see Annotations#MAX_PARALLEL - */ + /** + * If parallel evaluation is not allowed, then throws + * {@link IllegalArgumentException}. + */ + final protected void assertMaxParallelOne() { + + /* + * Note: Tests the annotation, not getMaxParallel(), since we want to + * make sure the annotation is valid and getMaxParallel() also tests for + * the ISingleThreadedOp interface. + */ + if (getProperty(PipelineOp.Annotations.MAX_PARALLEL, + PipelineOp.Annotations.DEFAULT_MAX_PARALLEL) != 1) { + + throw new IllegalArgumentException( + PipelineOp.Annotations.MAX_PARALLEL + "=" + + getMaxParallel()); + + } + + } + + /** + * The maximum parallelism with which tasks may be evaluated for this + * operator (this is a per-shard limit in scale-out). A value of ONE (1) + * indicates that at most ONE (1) instance of this task may be executing in + * parallel for a given shard and may be used to indicate that the operator + * evaluation task is not thread-safe. + * + * @see Annotations#MAX_PARALLEL + * @see ISingleThreadedOp + * + * @see <a href="http://trac.bigdata.com/ticket/1002"> </a> + */ final public int getMaxParallel() { + if(this instanceof ISingleThreadedOp) { + + // Ignore the annotation value. + return 1; + + } + return getProperty(PipelineOp.Annotations.MAX_PARALLEL, PipelineOp.Annotations.DEFAULT_MAX_PARALLEL); Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/controller/HTreeNamedSubqueryOp.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/controller/HTreeNamedSubqueryOp.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/controller/HTreeNamedSubqueryOp.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -40,6 +40,7 @@ import com.bigdata.bop.BOpUtility; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IQueryAttributes; +import com.bigdata.bop.ISingleThreadedOp; import com.bigdata.bop.IVariable; import com.bigdata.bop.NV; import com.bigdata.bop.PipelineOp; @@ -73,7 +74,8 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ -public class HTreeNamedSubqueryOp extends PipelineOp implements INamedSubqueryOp { +public class HTreeNamedSubqueryOp extends PipelineOp implements + INamedSubqueryOp, ISingleThreadedOp { static private final transient Logger log = Logger .getLogger(HTreeNamedSubqueryOp.class); @@ -123,11 +125,7 @@ + getEvaluationContext()); } - if (getMaxParallel() != 1) { - throw new IllegalArgumentException( - PipelineOp.Annotations.MAX_PARALLEL + "=" - + getMaxParallel()); - } + assertMaxParallelOne(); if (!isAtOnceEvaluation()) throw new IllegalArgumentException(); Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/controller/JVMNamedSubqueryOp.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/controller/JVMNamedSubqueryOp.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/controller/JVMNamedSubqueryOp.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -40,6 +40,7 @@ import com.bigdata.bop.BOpUtility; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IQueryAttributes; +import com.bigdata.bop.ISingleThreadedOp; import com.bigdata.bop.IVariable; import com.bigdata.bop.NV; import com.bigdata.bop.PipelineOp; @@ -73,7 +74,8 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ -public class JVMNamedSubqueryOp extends PipelineOp implements INamedSubqueryOp { +public class JVMNamedSubqueryOp extends PipelineOp implements INamedSubqueryOp, + ISingleThreadedOp { static private final transient Logger log = Logger .getLogger(JVMNamedSubqueryOp.class); @@ -112,11 +114,7 @@ + getEvaluationContext()); } - if (getMaxParallel() != 1) { - throw new IllegalArgumentException( - PipelineOp.Annotations.MAX_PARALLEL + "=" - + getMaxParallel()); - } + assertMaxParallelOne(); if (!isAtOnceEvaluation()) throw new IllegalArgumentException(); Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/fed/DelegateIndexManager.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/fed/DelegateIndexManager.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/fed/DelegateIndexManager.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -1,3 +1,27 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ package com.bigdata.bop.fed; import java.util.Iterator; @@ -35,8 +59,6 @@ * how to create the index partition view. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id: JoinTaskFactoryTask.java 3448 2010-08-18 20:55:58Z thompsonbry - * $ * * @todo While this class solves our problem I do not know whether or not this * class should this class have more visibility? The downside is that it @@ -64,7 +86,10 @@ /** * Delegates to the {@link IndexManager}. + * <p> + * {@inheritDoc} */ + @Override public IIndex getIndex(final String name, final long timestamp) { return dataService.getResourceManager().getIndex(name, timestamp); @@ -73,7 +98,10 @@ /** * Not allowed. + * <p> + * {@inheritDoc} */ + @Override public void dropIndex(final String name) { throw new UnsupportedOperationException(); @@ -83,79 +111,93 @@ /** * Not allowed. */ + @Override public void registerIndex(IndexMetadata indexMetadata) { throw new UnsupportedOperationException(); } + @Override public void destroy() { throw new UnsupportedOperationException(); } + @Override public ExecutorService getExecutorService() { return dataService.getFederation().getExecutorService(); } + @Override public BigdataFileSystem getGlobalFileSystem() { return dataService.getFederation().getGlobalFileSystem(); } + @Override public SparseRowStore getGlobalRowStore() { return dataService.getFederation().getGlobalRowStore(); } + @Override public SparseRowStore getGlobalRowStore(final long timestamp) { return dataService.getFederation().getGlobalRowStore(timestamp); } + @Override public long getLastCommitTime() { return dataService.getFederation().getLastCommitTime(); } + @Override public IResourceLocator getResourceLocator() { return dataService.getFederation().getResourceLocator(); } - + + @Override public IResourceLockService getResourceLockService() { return dataService.getFederation().getResourceLockService(); } + @Override public TemporaryStore getTempStore() { return dataService.getFederation().getTempStore(); } + @Override public ScheduledFuture<?> addScheduledTask(Runnable task, long initialDelay, long delay, TimeUnit unit) { return dataService.getFederation().addScheduledTask(task, initialDelay, delay, unit); } + @Override public boolean getCollectPlatformStatistics() { return dataService.getFederation().getCollectPlatformStatistics(); } + @Override public boolean getCollectQueueStatistics() { return dataService.getFederation().getCollectQueueStatistics(); } + @Override public int getHttpdPort() { return dataService.getFederation().getHttpdPort(); } @@ -171,6 +213,7 @@ return dataService.getFederation().getCounters(); } + @Override public String toString() { return super.toString() + "{dataServiceUUID=" Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinOp.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinOp.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinOp.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -33,6 +33,7 @@ import com.bigdata.bop.BOpContext; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IPredicate; +import com.bigdata.bop.ISingleThreadedOp; import com.bigdata.bop.NV; import com.bigdata.bop.PipelineOp; import com.bigdata.bop.controller.INamedSolutionSetRef; @@ -94,9 +95,9 @@ * @see HTreeHashJoinUtility * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ -public class HTreeHashJoinOp<E> extends HashJoinOp<E> { +public class HTreeHashJoinOp<E> extends HashJoinOp<E> implements + ISingleThreadedOp { /** * @@ -117,7 +118,7 @@ } - public HTreeHashJoinOp(final BOp[] args, NV... annotations) { + public HTreeHashJoinOp(final BOp[] args, final NV... annotations) { this(args, NV.asMap(annotations)); @@ -132,9 +133,7 @@ super(args, annotations); - if (getMaxParallel() != 1) - throw new UnsupportedOperationException(Annotations.MAX_PARALLEL - + "=" + getMaxParallel()); + assertMaxParallelOne(); // Note: This is no longer true. It is now shared via the IQueryAttributes. // // shared state is used to share the hash table. Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/HashIndexOp.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/HashIndexOp.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/HashIndexOp.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -38,6 +38,7 @@ import com.bigdata.bop.BOpUtility; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IQueryAttributes; +import com.bigdata.bop.ISingleThreadedOp; import com.bigdata.bop.IVariable; import com.bigdata.bop.NV; import com.bigdata.bop.PipelineOp; @@ -77,7 +78,7 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ -abstract public class HashIndexOp extends PipelineOp { +abstract public class HashIndexOp extends PipelineOp implements ISingleThreadedOp { // static private final transient Logger log = Logger // .getLogger(HashIndexOp.class); @@ -150,12 +151,18 @@ BOp.Annotations.EVALUATION_CONTEXT + "=" + getEvaluationContext()); } - if (getEvaluationContext() != BOpEvaluationContext.CONTROLLER) { - throw new IllegalArgumentException( - BOp.Annotations.EVALUATION_CONTEXT + "=" - + getEvaluationContext()); - } +// if (getEvaluationContext() != BOpEvaluationContext.CONTROLLER) { +// throw new IllegalArgumentException( +// BOp.Annotations.EVALUATION_CONTEXT + "=" +// + getEvaluationContext()); +// } + /* + * This operator writes on an object that is not thread-safe for + * mutation. + */ + assertMaxParallelOne(); + if (!isLastPassRequested()) { /* * Last pass evaluation must be requested. This operator will not Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/HashJoinOp.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/HashJoinOp.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/HashJoinOp.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -65,7 +65,6 @@ * which join are output. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ abstract public class HashJoinOp<E> extends PipelineOp implements IShardwisePipelineOp<E> { Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/JVMHashJoinOp.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/JVMHashJoinOp.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/join/JVMHashJoinOp.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -34,6 +34,7 @@ import com.bigdata.bop.HashMapAnnotations; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IPredicate; +import com.bigdata.bop.ISingleThreadedOp; import com.bigdata.bop.NV; import com.bigdata.bop.controller.INamedSolutionSetRef; import com.bigdata.relation.accesspath.IAccessPath; @@ -56,9 +57,8 @@ * @see JVMHashJoinUtility * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ -public class JVMHashJoinOp<E> extends HashJoinOp<E> { +public class JVMHashJoinOp<E> extends HashJoinOp<E> implements ISingleThreadedOp { /** * @@ -94,9 +94,7 @@ super(args, annotations); - if (getMaxParallel() != 1) - throw new UnsupportedOperationException(Annotations.MAX_PARALLEL - + "=" + getMaxParallel()); + assertMaxParallelOne(); assertAtOnceJavaHeapOp(); Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/solutions/HTreeDistinctBindingSetsOp.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/solutions/HTreeDistinctBindingSetsOp.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/solutions/HTreeDistinctBindingSetsOp.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -9,6 +9,7 @@ import com.bigdata.bop.HTreeAnnotations; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IQueryAttributes; +import com.bigdata.bop.ISingleThreadedOp; import com.bigdata.bop.IVariable; import com.bigdata.bop.NV; import com.bigdata.bop.PipelineOp; @@ -42,10 +43,9 @@ * on the native heap and eventually the machine will begin to swap. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id: DistinctElementFilter.java 3466 2010-08-27 14:28:04Z - * thompsonbry $ */ -public class HTreeDistinctBindingSetsOp extends PipelineOp { +public class HTreeDistinctBindingSetsOp extends PipelineOp implements + ISingleThreadedOp { // private final static transient Logger log = Logger // .getLogger(DistinctBindingSetsWithHTreeOp.class); @@ -96,9 +96,7 @@ + getEvaluationContext()); } - if (getMaxParallel() != 1) - throw new UnsupportedOperationException(Annotations.MAX_PARALLEL - + "=" + getMaxParallel()); + assertMaxParallelOne(); // // shared state is used to share the hash table. // if (!isSharedState()) { Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/solutions/MemorySortOp.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/solutions/MemorySortOp.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/solutions/MemorySortOp.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -34,7 +34,8 @@ * <p> * Computing the value expressions first is not only an efficiency, but is also * required in order to detect type errors. When a type error is detected for a - * value expression the corresponding input solution is dropped. Since the + * value expression the corresponding input solution is kept but with no new + * bindings, see trac-765. Since the * computed value expressions must become bound on the solutions to be sorted, * the caller is responsible for wrapping any value expression more complex than * a variable or a constant with an {@link IBind} onto an anonymous variable. @@ -290,14 +291,9 @@ } catch (SparqlTypeErrorException ex) { - // drop solution with type error. + // log type error, do not drop solution (see trac 765). TypeErrorLog.handleTypeError(ex, expr, stats); -// if (log.isInfoEnabled()) -// log.info("Dropping solution due to type error: " -// + bset); - continue; - } // add to the set of solutions to be sorted. Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/solutions/PipelinedAggregationOp.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/solutions/PipelinedAggregationOp.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/bop/solutions/PipelinedAggregationOp.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -19,6 +19,7 @@ import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IConstant; import com.bigdata.bop.IConstraint; +import com.bigdata.bop.ISingleThreadedOp; import com.bigdata.bop.IValueExpression; import com.bigdata.bop.IVariable; import com.bigdata.bop.PipelineOp; @@ -58,10 +59,9 @@ * the operator can still be invoked multiple times). * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id: DistinctElementFilter.java 3466 2010-08-27 14:28:04Z - * thompsonbry $ */ -public class PipelinedAggregationOp extends GroupByOp { +public class PipelinedAggregationOp extends GroupByOp implements + ISingleThreadedOp { private final static transient Logger log = Logger .getLogger(PipelinedAggregationOp.class); @@ -136,14 +136,11 @@ + "=" + isLastPassRequested()); } - if (getMaxParallel() != 1) { - /* - * Note: The operator MUST be single threaded in order to receive - * the isLastInvocation notice. - */ - throw new UnsupportedOperationException(Annotations.MAX_PARALLEL - + "=" + getMaxParallel()); - } + /* + * Note: The operator MUST be single threaded in order to receive the + * isLastInvocation notice. + */ + assertMaxParallelOne(); } Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractBTree.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractBTree.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractBTree.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -23,6 +23,7 @@ */ /* * Created on Dec 19, 2006 + * */ package com.bigdata.btree; @@ -3623,7 +3624,24 @@ assert node.dirty; assert !node.deleted; assert !node.isPersistent(); - assert !node.isReadOnly(); // FIXME Occasional CI errors on this assert for TestMROWTransactions. Also StressTestUnisolatedReadWriteIndex. See http://trac.bigdata.com/ticket/343 + /** + * Occasional CI errors on this assert for have been observed for + * StressTestUnisolatedReadWriteIndex. This has been traced to a test + * error. The test was interrupting the tasks, but the tasks were not + * being cancelled simultaneously. This meant that one task could be + * interrupted during an eviction from the write retention queue and + * that another task could obtain the UnisolatedReadWriteIndex lock and + * then hit the error since the BTree, the write retention queue, and + * the nodes that were being evicted would an inconsistent state state. + * The test does not fail if it is run to completion (no timeout). + * + * @see <a href="http://trac.bigdata.com/ticket/343" >Stochastic assert + * in AbstractBTree#writeNodeOrLeaf() in CI </a> + * + * TestMROWTransactions might also demonstrate an issue + * occasionally. If so, then check for the same root cause. + */ + assert !node.isReadOnly(); assertNotReadOnly(); /* Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractNode.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractNode.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/AbstractNode.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -23,6 +23,7 @@ */ /* * Created on Nov 15, 2006 + * */ package com.bigdata.btree; Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/BTree.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/BTree.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/BTree.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -23,6 +23,7 @@ */ /* * Created on Nov 15, 2006 + * */ package com.bigdata.btree; @@ -155,7 +156,6 @@ * several published papers. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class BTree extends AbstractBTree implements //ICommitter, ICheckpointProtocol {// ILocalBTreeView { @@ -167,18 +167,21 @@ } + @Override final public long getNodeCount() { return nnodes; } + @Override final public long getLeafCount() { return nleaves; } + @Override final public long getEntryCount() { return nentries; @@ -201,6 +204,7 @@ * counter will assign values within a namespace defined by the partition * identifier. */ + @Override public ICounter getCounter() { ICounter counter = new Counter(this); @@ -628,12 +632,14 @@ // } // final private boolean readOnly; + @Override final public long getLastCommitTime() { return lastCommitTime; } + @Override final public long getRevisionTimestamp() { if (readOnly) @@ -643,6 +649,7 @@ } + @Override final public void setLastCommitTime(final long lastCommitTime) { if (lastCommitTime == 0L) @@ -685,6 +692,7 @@ /** * Return the {@link IDirtyListener}. */ + @Override final public IDirtyListener getDirtyListener() { return listener; @@ -696,6 +704,7 @@ * * @param listener The listener. */ + @Override final public void setDirtyListener(final IDirtyListener listener) { assertNotReadOnly(); @@ -847,6 +856,7 @@ * * @see #load(IRawStore, long, boolean) */ + @Override final public long writeCheckpoint() { // write checkpoint and return address of that checkpoint record. @@ -859,6 +869,7 @@ * * @see #load(IRawStore, long, boolean) */ + @Override final public Checkpoint writeCheckpoint2() { assertNotTransient(); @@ -889,8 +900,10 @@ * @see https://sourceforge.net/apps/trac/bigdata/ticket/343 * @see https://sourceforge.net/apps/trac/bigdata/ticket/440 */ - final Lock lock = new UnisolatedReadWriteIndex(this).writeLock(); - try { +// final Lock lock = new UnisolatedReadWriteIndex(this).writeLock(); + final Lock lock = UnisolatedReadWriteIndex.getReadWriteLock(this).writeLock(); + lock.lock(); + try { if (/* autoCommit && */needsCheckpoint()) { @@ -1084,6 +1097,7 @@ } + @Override final public Checkpoint getCheckpoint() { if (checkpoint == null) @@ -1093,18 +1107,21 @@ } + @Override final public long getRecordVersion() { - return recordVersion; + return recordVersion; } + @Override final public long getMetadataAddr() { - return metadata.getMetadataAddr(); + return metadata.getMetadataAddr(); } + @Override final public long getRootAddr() { return (root == null ? getCheckpoint().getRootAddr() : root @@ -1293,9 +1310,10 @@ * @return The address of a {@link Checkpoint} record from which the btree * may be reloaded. */ + @Override public long handleCommit(final long commitTime) { - return writeCheckpoint2().getCheckpointAddr(); + return writeCheckpoint2().getCheckpointAddr(); } @@ -1316,6 +1334,7 @@ * and dropping indices vs removing the entries in an individual * {@link BTree}. */ + @Override final public void removeAll() { assertNotReadOnly(); @@ -1892,6 +1911,7 @@ private NodeFactory() { } + @Override public Leaf allocLeaf(final AbstractBTree btree, final long addr, final ILeafData data) { @@ -1899,6 +1919,7 @@ } + @Override public Node allocNode(final AbstractBTree btree, final long addr, final INodeData data) { @@ -1926,12 +1947,14 @@ } + @Override public long get() { return btree.counter.get(); } + @Override public long incrementAndGet() { final long counter = btree.counter.incrementAndGet(); @@ -2035,12 +2058,14 @@ } + @Override public long get() { return wrap( src.get() ); } + @Override public long incrementAndGet() { return wrap(src.incrementAndGet()); @@ -2098,6 +2123,7 @@ /** * Returns ONE (1). */ + @Override final public int getSourceCount() { return 1; @@ -2107,24 +2133,28 @@ /** * An array containing this {@link BTree}. */ + @Override final public AbstractBTree[] getSources() { return new AbstractBTree[]{this}; } + @Override final public BTree getMutableBTree() { return this; } + @Override public LeafCursor newLeafCursor(final SeekEnum where) { return new LeafCursor(where); } + @Override public LeafCursor newLeafCursor(final byte[] key) { return new LeafCursor(key); @@ -2400,18 +2430,21 @@ */ private Leaf leaf; + @Override public Leaf leaf() { return leaf; } + @Override public BTree getBTree() { return BTree.this; } + @Override public LeafCursor clone() { return new LeafCursor(this); @@ -2464,6 +2497,7 @@ } + @Override public Leaf first() { stack.clear(); @@ -2485,6 +2519,7 @@ } + @Override public Leaf last() { stack.clear(); @@ -2511,6 +2546,7 @@ * the leaf may not actually contain the key, in which case it is the * leaf that contains the insertion point for the key. */ + @Override public Leaf seek(final byte[] key) { stack.clear(); @@ -2533,6 +2569,7 @@ } + @Override public Leaf seek(final ILeafCursor<Leaf> src) { if (src == null) @@ -2558,6 +2595,7 @@ } + @Override public Leaf next() { // make sure that the current leaf is valid. @@ -2658,6 +2696,7 @@ * @return The prior leaf -or- <code>null</code> if there is no * predecessor of this leaf. */ + @Override public Leaf prior() { // make sure that the current leaf is valid. Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/Checkpoint.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/Checkpoint.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/Checkpoint.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -49,7 +49,6 @@ * you can start using the index. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class Checkpoint implements ICheckpoint, Externalizable { @@ -782,13 +781,14 @@ } /** - * Create a persistence capable data structure. + * Generic method to create a persistence capable data structure (GIST + * compatible, core implementation). * * @param store * The backing store. * @param metadata * The metadata that describes the data structure to be created. - * + * * @return The persistence capable data structure. */ public static ICheckpointProtocol create(final IRawStore store, Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/DefaultEvictionListener.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/DefaultEvictionListener.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/DefaultEvictionListener.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -23,6 +23,7 @@ */ /* * Created on Nov 17, 2006 + * */ package com.bigdata.btree; @@ -33,11 +34,11 @@ * persistence store. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class DefaultEvictionListener implements IEvictionListener { + @Override public void evicted(final IHardReferenceQueue<PO> cache, final PO ref) { final AbstractNode<?> node = (AbstractNode<?>) ref; Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/IIndex.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/IIndex.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/IIndex.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -43,7 +43,6 @@ * </p> * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface IIndex extends ISimpleBTree, IAutoboxBTree, IRangeQuery, IIndexLocalCounter, ICounterSetAccess { Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ILocalBTreeView.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ILocalBTreeView.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ILocalBTreeView.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -36,7 +36,6 @@ * {@link AbstractBTree}s. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface ILocalBTreeView extends IIndex { Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ReadCommittedView.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ReadCommittedView.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/ReadCommittedView.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -76,7 +76,6 @@ * its public API. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class ReadCommittedView implements ILocalBTreeView { @@ -86,7 +85,6 @@ * (b) the lastCommitTime on the journal is changed. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ private static class Basis { @@ -176,96 +174,112 @@ } + @Override public ICounter getCounter() { return getIndex().getCounter(); } + @Override public CounterSet getCounters() { return getIndex().getCounters(); } + @Override public IndexMetadata getIndexMetadata() { return getIndex().getIndexMetadata(); } + @Override public IResourceMetadata[] getResourceMetadata() { return getIndex().getResourceMetadata(); } + @Override public boolean contains(byte[] key) { return getIndex().contains(key); } + @Override public boolean contains(Object key) { return getIndex().contains(key); } + @Override public byte[] lookup(byte[] key) { return getIndex().lookup(key); } + @Override public Object lookup(Object key) { return getIndex().lookup(key); } + @Override public byte[] remove(byte[] key) { throw new UnsupportedOperationException(); } + @Override public Object remove(Object key) { throw new UnsupportedOperationException(); } + @Override public byte[] insert(byte[] key, byte[] value) { throw new UnsupportedOperationException(); } + @Override public Object insert(Object key, Object value) { throw new UnsupportedOperationException(); } + @Override public long rangeCount() { return getIndex().rangeCount(); } + @Override public long rangeCount(byte[] fromKey, byte[] toKey) { return getIndex().rangeCount(fromKey, toKey); } + @Override public long rangeCountExact(byte[] fromKey, byte[] toKey) { return getIndex().rangeCountExact(fromKey, toKey); } + @Override public long rangeCountExactWithDeleted(byte[] fromKey, byte[] toKey) { return getIndex().rangeCountExactWithDeleted(fromKey, toKey); @@ -283,12 +297,14 @@ * created. In order for newly committed state to be visible you must * request a new iterator. */ + @Override public ITupleIterator rangeIterator() { return getIndex().rangeIterator(); } + @Override public ITupleIterator rangeIterator(byte[] fromKey, byte[] toKey, int capacity, int flags, IFilter filterCtor) { @@ -297,6 +313,7 @@ } + @Override public ITupleIterator rangeIterator(byte[] fromKey, byte[] toKey) { return getIndex().rangeIterator(fromKey, toKey); @@ -308,6 +325,7 @@ * read-only contract for the procedures processed by this class. */ + @Override public void submit(byte[] fromKey, byte[] toKey, IKeyRangeIndexProcedure proc, IResultHandler handler) { @@ -315,12 +333,14 @@ } + @Override public Object submit(byte[] key, ISimpleIndexProcedure proc) { return getIndex().submit(key, proc); } + @Override public void submit(int fromIndex, int toIndex, byte[][] keys, byte[][] vals, AbstractKeyArrayIndexProcedureConstructor ctor, IResultHandler resultHandler) { @@ -329,30 +349,34 @@ } - public final BTreeCounters getBTreeCounters() { - - return getIndex().getBtreeCounters(); - - } +// public final BTreeCounters getBTreeCounters() { +// +// return getIndex().getBtreeCounters(); +// +// } + @Override public IBloomFilter getBloomFilter() { return getIndex().getBloomFilter(); } + @Override public BTree getMutableBTree() { return getIndex().getMutableBTree(); } + @Override public int getSourceCount() { return getIndex().getSourceCount(); } + @Override public AbstractBTree[] getSources() { return getIndex().getSources(); Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -23,6 +23,7 @@ */ /* * Created on Jan 10, 2008 + * */ package com.bigdata.btree; @@ -118,8 +119,6 @@ * computing the fix point of a rule set) is significantly lower. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id: UnisolatedReadWriteIndex.java 4054 2011-01-05 13:51:25Z - * thompsonbry $ */ public class UnisolatedReadWriteIndex implements IIndex, ILinearList { @@ -230,7 +229,7 @@ * * @return The acquired lock. */ - private Lock lock(final IIndexProcedure proc) { + private Lock lock(final IIndexProcedure<?> proc) { if (proc == null) throw new IllegalArgumentException(); @@ -274,7 +273,7 @@ * Canonicalizing mapping for the locks used to control access to the * unisolated index. */ - static final private WeakHashMap<ICommitter, ReadWriteLock> locks = new WeakHashMap<ICommitter,ReadWriteLock>(); + static final private WeakHashMap<ICommitter, ReentrantReadWriteLock> locks = new WeakHashMap<ICommitter,ReentrantReadWriteLock>(); /** * The default capacity for iterator reads against the underlying index. The @@ -364,14 +363,14 @@ * if the argument is <code>null</code>. */ // Note: Exposed to HTree, at least for now. - public static ReadWriteLock getReadWriteLock(final ICommitter btree) { + public static ReentrantReadWriteLock getReadWriteLock(final ICommitter btree) { if (btree == null) throw new IllegalArgumentException(); synchronized (locks) { - ReadWriteLock readWriteLock = locks.get(btree); + ReentrantReadWriteLock readWriteLock = locks.get(btree); if (readWriteLock == null) { @@ -386,24 +385,28 @@ } + @Override public String toString() { return getClass().getSimpleName() + "{" + ndx.toString() + "}"; } + @Override public IndexMetadata getIndexMetadata() { return ndx.getIndexMetadata(); } + @Override public IResourceMetadata[] getResourceMetadata() { return getIndexMetadata().getPartitionMetadata().getResources(); } + @Override public CounterSet getCounters() { return ndx.getCounters(); @@ -416,12 +419,14 @@ * * @throws UnsupportedOperationException */ + @Override public ICounter getCounter() { throw new UnsupportedOperationException(); } + @Override public boolean contains(final Object key) { final Lock lock = readLock(); @@ -438,6 +443,7 @@ } + @Override public Object insert(final Object key, final Object value) { final Lock lock = writeLock(); @@ -454,6 +460,7 @@ } + @Override public Object lookup(final Object key) { final Lock lock = readLock(); @@ -470,6 +477,7 @@ } + @Override public Object remove(final Object key) { final Lock lock = writeLock(); @@ -486,6 +494,7 @@ } + @Override public boolean contains(final byte[] key) { final Lock lock = readLock(); @@ -502,6 +511,7 @@ } + @Override public byte[] lookup(final byte[] key) { final Lock lock = readLock(); @@ -518,6 +528,7 @@ } + @Override public byte[] insert(final byte[] key, final byte[] value) { final Lock lock = writeLock(); @@ -534,6 +545,7 @@ } + @Override public byte[] remove(final byte[] key) { final Lock lock = writeLock(); @@ -550,6 +562,7 @@ } + @Override public long rangeCount() { final Lock lock = readLock(); @@ -566,6 +579,7 @@ } + @Override public long rangeCount(final byte[] fromKey, final byte[] toKey) { final Lock lock = readLock(); @@ -582,6 +596,7 @@ } + @Override public long rangeCountExact(final byte[] fromKey, final byte[] toKey) { final Lock lock = readLock(); @@ -598,6 +613,7 @@ } + @Override public long rangeCountExactWithDeleted(final byte[] fromKey, final byte[] toKey) { final Lock lock = readLock(); @@ -614,12 +630,16 @@ } + @Override + @SuppressWarnings("rawtypes") final public ITupleIterator rangeIterator() { return rangeIterator(null, null); } + @Override + @SuppressWarnings("rawtypes") public ITupleIterator rangeIterator(final byte[] fromKey, final byte[] toKey) { return rangeIterator(fromKey, toKey, 0/* capacity */, @@ -636,6 +656,8 @@ * from the underlying index. Likewise, the mutation methods on the iterator * will acquire the exclusive write lock. */ + @Override + @SuppressWarnings("rawtypes") public ITupleIterator rangeIterator(final byte[] fromKey, final byte[] toKey, int capacity, int flags, final IFilter filter) { @@ -681,7 +703,6 @@ * for the {@link Lock}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * */ private class ChunkedIterator<E> extends ChunkedLocalRangeIterator<E> { @@ -757,8 +778,9 @@ } - } + } // ChunkedIterator + @Override public Object submit(final byte[] key, final ISimpleIndexProcedure proc) { final Lock lock = lock(proc); @@ -780,6 +802,8 @@ } + @Override + @SuppressWarnings("rawtypes") public void submit(final byte[] fromKey, final byte[] toKey, final IKeyRangeIndexProcedure proc, final IResultHandler handler) { @@ -802,7 +826,8 @@ } - @SuppressWarnings("unchecked") + @Override + @SuppressWarnings({ "rawtypes", "unchecked" }) public void submit(final int fromIndex, final int toIndex, final byte[][] keys, final byte[][] vals, final AbstractKeyArrayIndexProcedureConstructor ctor, final IResultHandler aggregator) { Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/data/IAbstractNodeData.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/data/IAbstractNodeData.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/data/IAbstractNodeData.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -34,7 +34,6 @@ * Interface for low-level data access. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface IAbstractNodeData extends IDataRecordAccess { @@ -59,6 +58,7 @@ * @throws UnsupportedOperationException * unless {@link #isCoded()} returns <code>true</code>. */ + @Override AbstractFixedByteArrayBuffer data(); /** Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/data/ILeafData.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/data/ILeafData.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/data/ILeafData.java 2014-09-02 16:01:51 UTC (rev 8636) @@ -35,7 +35,6 @@ * Interface for low-level data access for the leaves of a B+-Tree. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface ILeafData extends IAbstractNodeData, IKeysData { @@ -124,6 +123,7 @@ /** * Return <code>true</code> iff the leaf maintains version timestamps. */ + @Override public boolean hasVersionTimestamps(); /** Modified: branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/isolation/IsolatedFusedView.java =================================================================== --- branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/isolation/IsolatedFusedView.java 2014-09-02 15:01:44 UTC (rev 8635) +++ branches/SESAME_2_7/bigdata/src/java/com/bigdata/btree/isolation/IsolatedFuse... [truncated message content] |